Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (104 commits) IB/iser: Don't change itt endianness IB/mlx4: Update module version and release date IPoIB: Handle case when P_Key is deleted and re-added at same index IB/iser: Release connection resources on RDMA_CM_EVENT_DEVICE_REMOVAL event IB/mlx4: Fix incorrect comment IB/mlx4: Fix race when detaching a QP from a multicast group IB/ehca: Support all ibv_devinfo values in query_device() and query_port() RDMA/nes: Free IRQ before killing tasklet IB/mthca: Update module version and release date IB/mlx4: Update QP state if query QP succeeds IB/mthca: Update QP state if query QP succeeds RDMA/amso1100: Add check for NULL reply_msg in c2_intr() IB/mlx4: Add support for resizing CQs IB/mlx4: Add support for modifying CQ moderation parameters IPoIB: Support modifying IPoIB CQ event moderation IB/core: Add support for modify CQ IPoIB: Add basic ethtool support mlx4_core: Increase max number of QPs to 128K RDMA/amso1100: Add support for "send with invalidate" work requests IB/core: Add support for "send with invalidate" work requests ...
This commit is contained in:
commit
75e98b3415
@ -467,6 +467,31 @@ static int cm_compare_private_data(u8 *private_data,
|
||||
return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Trivial helpers to strip endian annotation and compare; the
|
||||
* endianness doesn't actually matter since we just need a stable
|
||||
* order for the RB tree.
|
||||
*/
|
||||
static int be32_lt(__be32 a, __be32 b)
|
||||
{
|
||||
return (__force u32) a < (__force u32) b;
|
||||
}
|
||||
|
||||
static int be32_gt(__be32 a, __be32 b)
|
||||
{
|
||||
return (__force u32) a > (__force u32) b;
|
||||
}
|
||||
|
||||
static int be64_lt(__be64 a, __be64 b)
|
||||
{
|
||||
return (__force u64) a < (__force u64) b;
|
||||
}
|
||||
|
||||
static int be64_gt(__be64 a, __be64 b)
|
||||
{
|
||||
return (__force u64) a > (__force u64) b;
|
||||
}
|
||||
|
||||
static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
|
||||
{
|
||||
struct rb_node **link = &cm.listen_service_table.rb_node;
|
||||
@ -492,9 +517,9 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
|
||||
link = &(*link)->rb_left;
|
||||
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
|
||||
link = &(*link)->rb_right;
|
||||
else if (service_id < cur_cm_id_priv->id.service_id)
|
||||
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
|
||||
link = &(*link)->rb_left;
|
||||
else if (service_id > cur_cm_id_priv->id.service_id)
|
||||
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
|
||||
link = &(*link)->rb_right;
|
||||
else if (data_cmp < 0)
|
||||
link = &(*link)->rb_left;
|
||||
@ -527,9 +552,9 @@ static struct cm_id_private * cm_find_listen(struct ib_device *device,
|
||||
node = node->rb_left;
|
||||
else if (device > cm_id_priv->id.device)
|
||||
node = node->rb_right;
|
||||
else if (service_id < cm_id_priv->id.service_id)
|
||||
else if (be64_lt(service_id, cm_id_priv->id.service_id))
|
||||
node = node->rb_left;
|
||||
else if (service_id > cm_id_priv->id.service_id)
|
||||
else if (be64_gt(service_id, cm_id_priv->id.service_id))
|
||||
node = node->rb_right;
|
||||
else if (data_cmp < 0)
|
||||
node = node->rb_left;
|
||||
@ -552,13 +577,13 @@ static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
|
||||
parent = *link;
|
||||
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
|
||||
remote_id_node);
|
||||
if (remote_id < cur_timewait_info->work.remote_id)
|
||||
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
|
||||
link = &(*link)->rb_left;
|
||||
else if (remote_id > cur_timewait_info->work.remote_id)
|
||||
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
|
||||
link = &(*link)->rb_right;
|
||||
else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
|
||||
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
||||
link = &(*link)->rb_left;
|
||||
else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
|
||||
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
||||
link = &(*link)->rb_right;
|
||||
else
|
||||
return cur_timewait_info;
|
||||
@ -578,13 +603,13 @@ static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
|
||||
while (node) {
|
||||
timewait_info = rb_entry(node, struct cm_timewait_info,
|
||||
remote_id_node);
|
||||
if (remote_id < timewait_info->work.remote_id)
|
||||
if (be32_lt(remote_id, timewait_info->work.remote_id))
|
||||
node = node->rb_left;
|
||||
else if (remote_id > timewait_info->work.remote_id)
|
||||
else if (be32_gt(remote_id, timewait_info->work.remote_id))
|
||||
node = node->rb_right;
|
||||
else if (remote_ca_guid < timewait_info->remote_ca_guid)
|
||||
else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
|
||||
node = node->rb_left;
|
||||
else if (remote_ca_guid > timewait_info->remote_ca_guid)
|
||||
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
|
||||
node = node->rb_right;
|
||||
else
|
||||
return timewait_info;
|
||||
@ -605,13 +630,13 @@ static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
|
||||
parent = *link;
|
||||
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
|
||||
remote_qp_node);
|
||||
if (remote_qpn < cur_timewait_info->remote_qpn)
|
||||
if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
|
||||
link = &(*link)->rb_left;
|
||||
else if (remote_qpn > cur_timewait_info->remote_qpn)
|
||||
else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
|
||||
link = &(*link)->rb_right;
|
||||
else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
|
||||
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
||||
link = &(*link)->rb_left;
|
||||
else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
|
||||
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
|
||||
link = &(*link)->rb_right;
|
||||
else
|
||||
return cur_timewait_info;
|
||||
@ -635,9 +660,9 @@ static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
|
||||
parent = *link;
|
||||
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
|
||||
sidr_id_node);
|
||||
if (remote_id < cur_cm_id_priv->id.remote_id)
|
||||
if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
|
||||
link = &(*link)->rb_left;
|
||||
else if (remote_id > cur_cm_id_priv->id.remote_id)
|
||||
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
|
||||
link = &(*link)->rb_right;
|
||||
else {
|
||||
int cmp;
|
||||
@ -2848,7 +2873,7 @@ static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
|
||||
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
|
||||
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
|
||||
sidr_req_msg->request_id = cm_id_priv->id.local_id;
|
||||
sidr_req_msg->pkey = cpu_to_be16(param->path->pkey);
|
||||
sidr_req_msg->pkey = param->path->pkey;
|
||||
sidr_req_msg->service_id = param->service_id;
|
||||
|
||||
if (param->private_data && param->private_data_len)
|
||||
|
@ -1289,7 +1289,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
new_cm_id = rdma_create_id(listen_id->id.event_handler,
|
||||
listen_id->id.context,
|
||||
RDMA_PS_TCP);
|
||||
if (!new_cm_id) {
|
||||
if (IS_ERR(new_cm_id)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -158,8 +158,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
|
||||
#endif
|
||||
}
|
||||
|
||||
list_splice(&pool->dirty_list, &unmap_list);
|
||||
INIT_LIST_HEAD(&pool->dirty_list);
|
||||
list_splice_init(&pool->dirty_list, &unmap_list);
|
||||
pool->dirty_len = 0;
|
||||
|
||||
spin_unlock_irq(&pool->pool_lock);
|
||||
|
@ -614,7 +614,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
|
||||
if (!ctx->cm_id->device)
|
||||
goto out;
|
||||
|
||||
resp.node_guid = ctx->cm_id->device->node_guid;
|
||||
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
|
||||
resp.port_num = ctx->cm_id->port_num;
|
||||
switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
|
||||
case RDMA_TRANSPORT_IB:
|
||||
|
@ -81,13 +81,13 @@ struct ib_uverbs_device {
|
||||
|
||||
struct ib_uverbs_event_file {
|
||||
struct kref ref;
|
||||
struct file *file;
|
||||
struct ib_uverbs_file *uverbs_file;
|
||||
spinlock_t lock;
|
||||
int is_async;
|
||||
wait_queue_head_t poll_wait;
|
||||
struct fasync_struct *async_queue;
|
||||
struct list_head event_list;
|
||||
int is_async;
|
||||
int is_closed;
|
||||
};
|
||||
|
||||
struct ib_uverbs_file {
|
||||
|
@ -1065,6 +1065,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
||||
attr.srq = srq;
|
||||
attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
|
||||
attr.qp_type = cmd.qp_type;
|
||||
attr.create_flags = 0;
|
||||
|
||||
attr.cap.max_send_wr = cmd.max_send_wr;
|
||||
attr.cap.max_recv_wr = cmd.max_recv_wr;
|
||||
@ -1462,7 +1463,6 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
next->num_sge = user_wr->num_sge;
|
||||
next->opcode = user_wr->opcode;
|
||||
next->send_flags = user_wr->send_flags;
|
||||
next->imm_data = (__be32 __force) user_wr->imm_data;
|
||||
|
||||
if (is_ud) {
|
||||
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
|
||||
@ -1475,14 +1475,24 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
|
||||
} else {
|
||||
switch (next->opcode) {
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_WRITE_WITH_IMM:
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
case IB_WR_RDMA_WRITE:
|
||||
case IB_WR_RDMA_READ:
|
||||
next->wr.rdma.remote_addr =
|
||||
user_wr->wr.rdma.remote_addr;
|
||||
next->wr.rdma.rkey =
|
||||
user_wr->wr.rdma.rkey;
|
||||
break;
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
next->ex.imm_data =
|
||||
(__be32 __force) user_wr->ex.imm_data;
|
||||
break;
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
next->ex.invalidate_rkey =
|
||||
user_wr->ex.invalidate_rkey;
|
||||
break;
|
||||
case IB_WR_ATOMIC_CMP_AND_SWP:
|
||||
case IB_WR_ATOMIC_FETCH_AND_ADD:
|
||||
next->wr.atomic.remote_addr =
|
||||
|
@ -352,7 +352,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
|
||||
struct ib_uverbs_event *entry, *tmp;
|
||||
|
||||
spin_lock_irq(&file->lock);
|
||||
file->file = NULL;
|
||||
file->is_closed = 1;
|
||||
list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
|
||||
if (entry->counter)
|
||||
list_del(&entry->obj_list);
|
||||
@ -390,7 +390,7 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&file->lock, flags);
|
||||
if (!file->file) {
|
||||
if (file->is_closed) {
|
||||
spin_unlock_irqrestore(&file->lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -423,7 +423,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&file->async_file->lock, flags);
|
||||
if (!file->async_file->file) {
|
||||
if (!file->async_file->is_closed) {
|
||||
spin_unlock_irqrestore(&file->async_file->lock, flags);
|
||||
return;
|
||||
}
|
||||
@ -509,6 +509,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
|
||||
ev_file->uverbs_file = uverbs_file;
|
||||
ev_file->async_queue = NULL;
|
||||
ev_file->is_async = is_async;
|
||||
ev_file->is_closed = 0;
|
||||
|
||||
*fd = get_unused_fd();
|
||||
if (*fd < 0) {
|
||||
@ -516,25 +517,18 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
|
||||
goto err;
|
||||
}
|
||||
|
||||
filp = get_empty_filp();
|
||||
if (!filp) {
|
||||
ret = -ENFILE;
|
||||
goto err_fd;
|
||||
}
|
||||
|
||||
ev_file->file = filp;
|
||||
|
||||
/*
|
||||
* fops_get() can't fail here, because we're coming from a
|
||||
* system call on a uverbs file, which will already have a
|
||||
* module reference.
|
||||
*/
|
||||
filp->f_op = fops_get(&uverbs_event_fops);
|
||||
filp->f_path.mnt = mntget(uverbs_event_mnt);
|
||||
filp->f_path.dentry = dget(uverbs_event_mnt->mnt_root);
|
||||
filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
|
||||
filp->f_flags = O_RDONLY;
|
||||
filp->f_mode = FMODE_READ;
|
||||
filp = alloc_file(uverbs_event_mnt, dget(uverbs_event_mnt->mnt_root),
|
||||
FMODE_READ, fops_get(&uverbs_event_fops));
|
||||
if (!filp) {
|
||||
ret = -ENFILE;
|
||||
goto err_fd;
|
||||
}
|
||||
|
||||
filp->private_data = ev_file;
|
||||
|
||||
return filp;
|
||||
|
@ -248,7 +248,9 @@ int ib_modify_srq(struct ib_srq *srq,
|
||||
struct ib_srq_attr *srq_attr,
|
||||
enum ib_srq_attr_mask srq_attr_mask)
|
||||
{
|
||||
return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
|
||||
return srq->device->modify_srq ?
|
||||
srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
|
||||
-ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_srq);
|
||||
|
||||
@ -628,6 +630,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_cq);
|
||||
|
||||
int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
return cq->device->modify_cq ?
|
||||
cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_cq);
|
||||
|
||||
int ib_destroy_cq(struct ib_cq *cq)
|
||||
{
|
||||
if (atomic_read(&cq->usecnt))
|
||||
@ -672,6 +681,9 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
|
||||
{
|
||||
struct ib_mr *mr;
|
||||
|
||||
if (!pd->device->reg_phys_mr)
|
||||
return ERR_PTR(-ENOSYS);
|
||||
|
||||
mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
|
||||
mr_access_flags, iova_start);
|
||||
|
||||
|
@ -130,10 +130,10 @@ static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
|
||||
tx_desc->status = 0;
|
||||
|
||||
/* Set TXP_HTXD_UNINIT */
|
||||
__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
|
||||
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
|
||||
(void __iomem *) txp_desc + C2_TXP_ADDR);
|
||||
__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
|
||||
__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
|
||||
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
|
||||
(void __iomem *) txp_desc + C2_TXP_FLAGS);
|
||||
|
||||
elem->skb = NULL;
|
||||
@ -179,13 +179,13 @@ static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
|
||||
rx_desc->status = 0;
|
||||
|
||||
/* Set RXP_HRXD_UNINIT */
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_OK),
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
|
||||
(void __iomem *) rxp_desc + C2_RXP_STATUS);
|
||||
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
|
||||
__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
|
||||
__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
|
||||
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
|
||||
(void __iomem *) rxp_desc + C2_RXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
|
||||
(void __iomem *) rxp_desc + C2_RXP_FLAGS);
|
||||
|
||||
elem->skb = NULL;
|
||||
@ -239,10 +239,11 @@ static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
|
||||
rxp_hdr->flags = RXP_HRXD_READY;
|
||||
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
|
||||
__raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
|
||||
__raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
|
||||
elem->hw_desc + C2_RXP_LEN);
|
||||
__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
|
||||
__raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
|
||||
elem->hw_desc + C2_RXP_FLAGS);
|
||||
|
||||
elem->skb = skb;
|
||||
elem->mapaddr = mapaddr;
|
||||
@ -290,9 +291,9 @@ static void c2_rx_clean(struct c2_port *c2_port)
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_LEN);
|
||||
__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),
|
||||
__raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
|
||||
elem->hw_desc + C2_RXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
|
||||
elem->hw_desc + C2_RXP_FLAGS);
|
||||
|
||||
if (elem->skb) {
|
||||
@ -346,16 +347,16 @@ static void c2_tx_clean(struct c2_port *c2_port)
|
||||
elem->hw_desc + C2_TXP_LEN);
|
||||
__raw_writeq(0,
|
||||
elem->hw_desc + C2_TXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(TXP_HTXD_DONE),
|
||||
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
|
||||
elem->hw_desc + C2_TXP_FLAGS);
|
||||
c2_port->netstats.tx_dropped++;
|
||||
break;
|
||||
} else {
|
||||
__raw_writew(0,
|
||||
elem->hw_desc + C2_TXP_LEN);
|
||||
__raw_writeq(cpu_to_be64(0x1122334455667788ULL),
|
||||
__raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
|
||||
elem->hw_desc + C2_TXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),
|
||||
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
|
||||
elem->hw_desc + C2_TXP_FLAGS);
|
||||
}
|
||||
|
||||
@ -390,7 +391,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
|
||||
for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
|
||||
elem = elem->next) {
|
||||
txp_htxd.flags =
|
||||
be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));
|
||||
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
|
||||
|
||||
if (txp_htxd.flags != TXP_HTXD_DONE)
|
||||
break;
|
||||
@ -398,7 +399,7 @@ static void c2_tx_interrupt(struct net_device *netdev)
|
||||
if (netif_msg_tx_done(c2_port)) {
|
||||
/* PCI reads are expensive in fast path */
|
||||
txp_htxd.len =
|
||||
be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));
|
||||
be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
|
||||
pr_debug("%s: tx done slot %3Zu status 0x%x len "
|
||||
"%5u bytes\n",
|
||||
netdev->name, elem - tx_ring->start,
|
||||
@ -448,10 +449,12 @@ static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
|
||||
/* Write the descriptor to the adapter's rx ring */
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
|
||||
__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
|
||||
__raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
|
||||
__raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
|
||||
elem->hw_desc + C2_RXP_LEN);
|
||||
__raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);
|
||||
__raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
|
||||
elem->hw_desc + C2_RXP_ADDR);
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
|
||||
elem->hw_desc + C2_RXP_FLAGS);
|
||||
|
||||
pr_debug("packet dropped\n");
|
||||
c2_port->netstats.rx_dropped++;
|
||||
@ -653,7 +656,7 @@ static int c2_up(struct net_device *netdev)
|
||||
i++, elem++) {
|
||||
rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
|
||||
rxp_hdr->flags = 0;
|
||||
__raw_writew(cpu_to_be16(RXP_HRXD_READY),
|
||||
__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
|
||||
elem->hw_desc + C2_RXP_FLAGS);
|
||||
}
|
||||
|
||||
@ -787,9 +790,12 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
elem->maplen = maplen;
|
||||
|
||||
/* Tell HW to xmit */
|
||||
__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);
|
||||
__raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);
|
||||
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
|
||||
elem->hw_desc + C2_TXP_ADDR);
|
||||
__raw_writew((__force u16) cpu_to_be16(maplen),
|
||||
elem->hw_desc + C2_TXP_LEN);
|
||||
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
|
||||
elem->hw_desc + C2_TXP_FLAGS);
|
||||
|
||||
c2_port->netstats.tx_packets++;
|
||||
c2_port->netstats.tx_bytes += maplen;
|
||||
@ -810,11 +816,11 @@ static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||
elem->maplen = maplen;
|
||||
|
||||
/* Tell HW to xmit */
|
||||
__raw_writeq(cpu_to_be64(mapaddr),
|
||||
__raw_writeq((__force u64) cpu_to_be64(mapaddr),
|
||||
elem->hw_desc + C2_TXP_ADDR);
|
||||
__raw_writew(cpu_to_be16(maplen),
|
||||
__raw_writew((__force u16) cpu_to_be16(maplen),
|
||||
elem->hw_desc + C2_TXP_LEN);
|
||||
__raw_writew(cpu_to_be16(TXP_HTXD_READY),
|
||||
__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
|
||||
elem->hw_desc + C2_TXP_FLAGS);
|
||||
|
||||
c2_port->netstats.tx_packets++;
|
||||
@ -1005,7 +1011,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
/* Remap the adapter PCI registers in BAR4 */
|
||||
mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
|
||||
sizeof(struct c2_adapter_pci_regs));
|
||||
if (mmio_regs == 0UL) {
|
||||
if (!mmio_regs) {
|
||||
printk(KERN_ERR PFX
|
||||
"Unable to remap adapter PCI registers in BAR4\n");
|
||||
ret = -EIO;
|
||||
@ -1029,10 +1035,10 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
}
|
||||
|
||||
/* Validate the adapter version */
|
||||
if (be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
|
||||
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
|
||||
printk(KERN_ERR PFX "Version mismatch "
|
||||
"[fw=%u, c2=%u], Adapter not claimed\n",
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_VERS)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
|
||||
C2_VERSION);
|
||||
ret = -EINVAL;
|
||||
iounmap(mmio_regs);
|
||||
@ -1040,12 +1046,12 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
}
|
||||
|
||||
/* Validate the adapter IVN */
|
||||
if (be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
|
||||
if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
|
||||
printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
|
||||
"the OpenIB device support kit. "
|
||||
"[fw=0x%x, c2=0x%x], Adapter not claimed\n",
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_IVN)),
|
||||
C2_IVN);
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
|
||||
C2_IVN);
|
||||
ret = -EINVAL;
|
||||
iounmap(mmio_regs);
|
||||
goto bail2;
|
||||
@ -1068,7 +1074,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
|
||||
/* Get the last RX index */
|
||||
c2dev->cur_rx =
|
||||
(be32_to_cpu(readl(mmio_regs + C2_REGS_HRX_CUR)) -
|
||||
(be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
|
||||
0xffffc000) / sizeof(struct c2_rxp_desc);
|
||||
|
||||
/* Request an interrupt line for the driver */
|
||||
@ -1090,7 +1096,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
}
|
||||
|
||||
/* Save off the actual size prior to unmapping mmio_regs */
|
||||
kva_map_size = be32_to_cpu(readl(mmio_regs + C2_REGS_PCI_WINSIZE));
|
||||
kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
|
||||
|
||||
/* Unmap the adapter PCI registers in BAR4 */
|
||||
iounmap(mmio_regs);
|
||||
@ -1109,7 +1115,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
/* Remap the adapter HRXDQ PA space to kernel VA space */
|
||||
c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
|
||||
C2_RXP_HRXDQ_SIZE);
|
||||
if (c2dev->mmio_rxp_ring == 0UL) {
|
||||
if (!c2dev->mmio_rxp_ring) {
|
||||
printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
|
||||
ret = -EIO;
|
||||
goto bail6;
|
||||
@ -1118,7 +1124,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
/* Remap the adapter HTXDQ PA space to kernel VA space */
|
||||
c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
|
||||
C2_TXP_HTXDQ_SIZE);
|
||||
if (c2dev->mmio_txp_ring == 0UL) {
|
||||
if (!c2dev->mmio_txp_ring) {
|
||||
printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
|
||||
ret = -EIO;
|
||||
goto bail7;
|
||||
@ -1129,7 +1135,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
|
||||
/* Remap the PCI registers in adapter BAR0 to kernel VA space */
|
||||
c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
|
||||
if (c2dev->regs == 0UL) {
|
||||
if (!c2dev->regs) {
|
||||
printk(KERN_ERR PFX "Unable to remap BAR0\n");
|
||||
ret = -EIO;
|
||||
goto bail8;
|
||||
@ -1139,7 +1145,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||
c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
|
||||
c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
|
||||
kva_map_size);
|
||||
if (c2dev->kva == 0UL) {
|
||||
if (!c2dev->kva) {
|
||||
printk(KERN_ERR PFX "Unable to remap BAR4\n");
|
||||
ret = -EIO;
|
||||
goto bail9;
|
||||
|
@ -346,7 +346,7 @@ struct c2_dev {
|
||||
// spinlock_t aeq_lock;
|
||||
// spinlock_t rnic_lock;
|
||||
|
||||
u16 *hint_count;
|
||||
__be16 *hint_count;
|
||||
dma_addr_t hint_count_dma;
|
||||
u16 hints_read;
|
||||
|
||||
@ -425,10 +425,10 @@ static inline void __raw_writeq(u64 val, void __iomem * addr)
|
||||
#endif
|
||||
|
||||
#define C2_SET_CUR_RX(c2dev, cur_rx) \
|
||||
__raw_writel(cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
|
||||
__raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
|
||||
|
||||
#define C2_GET_CUR_RX(c2dev) \
|
||||
be32_to_cpu(readl(c2dev->mmio_txp_ring + 4092))
|
||||
be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
|
||||
|
||||
static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
|
||||
{
|
||||
@ -485,8 +485,8 @@ extern void c2_unregister_device(struct c2_dev *c2dev);
|
||||
extern int c2_rnic_init(struct c2_dev *c2dev);
|
||||
extern void c2_rnic_term(struct c2_dev *c2dev);
|
||||
extern void c2_rnic_interrupt(struct c2_dev *c2dev);
|
||||
extern int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
|
||||
extern int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask);
|
||||
extern int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
|
||||
extern int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
|
||||
|
||||
/* QPs */
|
||||
extern int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
|
||||
@ -545,7 +545,7 @@ extern void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
|
||||
extern int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
|
||||
struct sp_chunk **root);
|
||||
extern void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
|
||||
extern u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
|
||||
dma_addr_t *dma_addr, gfp_t gfp_mask);
|
||||
extern void c2_free_mqsp(u16 * mqsp);
|
||||
extern __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
|
||||
dma_addr_t *dma_addr, gfp_t gfp_mask);
|
||||
extern void c2_free_mqsp(__be16* mqsp);
|
||||
#endif
|
||||
|
@ -61,7 +61,7 @@ static int c2_convert_cm_status(u32 c2_status)
|
||||
default:
|
||||
printk(KERN_ERR PFX
|
||||
"%s - Unable to convert CM status: %d\n",
|
||||
__FUNCTION__, c2_status);
|
||||
__func__, c2_status);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -193,9 +193,9 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
|
||||
pr_debug("%s: event = %s, user_context=%llx, "
|
||||
"resource_type=%x, "
|
||||
"resource=%x, qp_state=%s\n",
|
||||
__FUNCTION__,
|
||||
__func__,
|
||||
to_event_str(event_id),
|
||||
(unsigned long long) be64_to_cpu(wr->ae.ae_generic.user_context),
|
||||
(unsigned long long) wr->ae.ae_generic.user_context,
|
||||
be32_to_cpu(wr->ae.ae_generic.resource_type),
|
||||
be32_to_cpu(wr->ae.ae_generic.resource),
|
||||
to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
|
||||
@ -259,7 +259,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
|
||||
BUG_ON(1);
|
||||
pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
|
||||
"CM_ID=%p\n",
|
||||
__FUNCTION__, __LINE__,
|
||||
__func__, __LINE__,
|
||||
event_id, qp, cm_id);
|
||||
break;
|
||||
}
|
||||
@ -276,7 +276,7 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
|
||||
pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
|
||||
if (event_id != CCAE_CONNECTION_REQUEST) {
|
||||
pr_debug("%s: Invalid event_id: %d\n",
|
||||
__FUNCTION__, event_id);
|
||||
__func__, event_id);
|
||||
break;
|
||||
}
|
||||
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
|
||||
|
@ -87,8 +87,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
|
||||
}
|
||||
}
|
||||
|
||||
u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
|
||||
dma_addr_t *dma_addr, gfp_t gfp_mask)
|
||||
__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
|
||||
dma_addr_t *dma_addr, gfp_t gfp_mask)
|
||||
{
|
||||
u16 mqsp;
|
||||
|
||||
@ -113,14 +113,14 @@ u16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
|
||||
*dma_addr = head->dma_addr +
|
||||
((unsigned long) &(head->shared_ptr[mqsp]) -
|
||||
(unsigned long) head);
|
||||
pr_debug("%s addr %p dma_addr %llx\n", __FUNCTION__,
|
||||
pr_debug("%s addr %p dma_addr %llx\n", __func__,
|
||||
&(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
|
||||
return &(head->shared_ptr[mqsp]);
|
||||
return (__force __be16 *) &(head->shared_ptr[mqsp]);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void c2_free_mqsp(u16 * mqsp)
|
||||
void c2_free_mqsp(__be16 *mqsp)
|
||||
{
|
||||
struct sp_chunk *head;
|
||||
u16 idx;
|
||||
@ -129,7 +129,7 @@ void c2_free_mqsp(u16 * mqsp)
|
||||
head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
|
||||
|
||||
/* Link head to new mqsp */
|
||||
*mqsp = head->head;
|
||||
*mqsp = (__force __be16) head->head;
|
||||
|
||||
/* Compute the shared_ptr index */
|
||||
idx = ((unsigned long) mqsp & ~PAGE_MASK) >> 1;
|
||||
|
@ -422,8 +422,8 @@ void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
|
||||
goto bail1;
|
||||
|
||||
reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
|
||||
|
||||
vq_repbuf_free(c2dev, reply);
|
||||
if (reply)
|
||||
vq_repbuf_free(c2dev, reply);
|
||||
bail1:
|
||||
vq_req_free(c2dev, vq_req);
|
||||
bail0:
|
||||
|
@ -174,7 +174,11 @@ static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
|
||||
return;
|
||||
}
|
||||
|
||||
err = c2_errno(reply_msg);
|
||||
if (reply_msg)
|
||||
err = c2_errno(reply_msg);
|
||||
else
|
||||
err = -ENOMEM;
|
||||
|
||||
if (!err) switch (req->event) {
|
||||
case IW_CM_EVENT_ESTABLISHED:
|
||||
c2_set_qp_state(req->qp,
|
||||
|
@ -45,7 +45,7 @@
|
||||
* Reply buffer _is_ freed by this function.
|
||||
*/
|
||||
static int
|
||||
send_pbl_messages(struct c2_dev *c2dev, u32 stag_index,
|
||||
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
|
||||
unsigned long va, u32 pbl_depth,
|
||||
struct c2_vq_req *vq_req, int pbl_type)
|
||||
{
|
||||
|
@ -64,7 +64,7 @@ void c2_mq_produce(struct c2_mq *q)
|
||||
q->priv = (q->priv + 1) % q->q_size;
|
||||
q->hint_count++;
|
||||
/* Update peer's offset. */
|
||||
__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
|
||||
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
|
||||
}
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ void c2_mq_free(struct c2_mq *q)
|
||||
#endif
|
||||
q->priv = (q->priv + 1) % q->q_size;
|
||||
/* Update peer's offset. */
|
||||
__raw_writew(cpu_to_be16(q->priv), &q->peer->shared);
|
||||
__raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ struct c2_mq {
|
||||
u16 hint_count;
|
||||
u16 priv;
|
||||
struct c2_mq_shared __iomem *peer;
|
||||
u16 *shared;
|
||||
__be16 *shared;
|
||||
dma_addr_t shared_dma;
|
||||
u32 q_size;
|
||||
u32 msg_size;
|
||||
|
@ -67,7 +67,7 @@ static int c2_query_device(struct ib_device *ibdev,
|
||||
{
|
||||
struct c2_dev *c2dev = to_c2dev(ibdev);
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
*props = c2dev->props;
|
||||
return 0;
|
||||
@ -76,7 +76,7 @@ static int c2_query_device(struct ib_device *ibdev,
|
||||
static int c2_query_port(struct ib_device *ibdev,
|
||||
u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
props->lid = 0;
|
||||
@ -102,14 +102,14 @@ static int c2_modify_port(struct ib_device *ibdev,
|
||||
u8 port, int port_modify_mask,
|
||||
struct ib_port_modify *props)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int c2_query_pkey(struct ib_device *ibdev,
|
||||
u8 port, u16 index, u16 * pkey)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
*pkey = 0;
|
||||
return 0;
|
||||
}
|
||||
@ -119,7 +119,7 @@ static int c2_query_gid(struct ib_device *ibdev, u8 port,
|
||||
{
|
||||
struct c2_dev *c2dev = to_c2dev(ibdev);
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
|
||||
memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
|
||||
|
||||
@ -134,7 +134,7 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
|
||||
{
|
||||
struct c2_ucontext *context;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
context = kmalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -144,14 +144,14 @@ static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
|
||||
|
||||
static int c2_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
kfree(context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
@ -162,7 +162,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
|
||||
struct c2_pd *pd;
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
|
||||
if (!pd)
|
||||
@ -187,7 +187,7 @@ static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
|
||||
|
||||
static int c2_dealloc_pd(struct ib_pd *pd)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
|
||||
kfree(pd);
|
||||
|
||||
@ -196,13 +196,13 @@ static int c2_dealloc_pd(struct ib_pd *pd)
|
||||
|
||||
static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return ERR_PTR(-ENOSYS);
|
||||
}
|
||||
|
||||
static int c2_ah_destroy(struct ib_ah *ah)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
@ -230,7 +230,7 @@ struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
|
||||
|
||||
qp = c2_find_qpn(c2dev, qpn);
|
||||
pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
|
||||
__FUNCTION__, qp, qpn, device,
|
||||
__func__, qp, qpn, device,
|
||||
(qp?atomic_read(&qp->refcount):0));
|
||||
|
||||
return (qp?&qp->ibqp:NULL);
|
||||
@ -243,13 +243,16 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
|
||||
struct c2_qp *qp;
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
|
||||
if (!qp) {
|
||||
pr_debug("%s: Unable to allocate QP\n", __FUNCTION__);
|
||||
pr_debug("%s: Unable to allocate QP\n", __func__);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
spin_lock_init(&qp->lock);
|
||||
@ -266,7 +269,7 @@ static struct ib_qp *c2_create_qp(struct ib_pd *pd,
|
||||
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s: Invalid QP type: %d\n", __FUNCTION__,
|
||||
pr_debug("%s: Invalid QP type: %d\n", __func__,
|
||||
init_attr->qp_type);
|
||||
return ERR_PTR(-EINVAL);
|
||||
break;
|
||||
@ -285,7 +288,7 @@ static int c2_destroy_qp(struct ib_qp *ib_qp)
|
||||
struct c2_qp *qp = to_c2qp(ib_qp);
|
||||
|
||||
pr_debug("%s:%u qp=%p,qp->state=%d\n",
|
||||
__FUNCTION__, __LINE__,ib_qp,qp->state);
|
||||
__func__, __LINE__, ib_qp, qp->state);
|
||||
c2_free_qp(to_c2dev(ib_qp->device), qp);
|
||||
kfree(qp);
|
||||
return 0;
|
||||
@ -300,13 +303,13 @@ static struct ib_cq *c2_create_cq(struct ib_device *ibdev, int entries, int vect
|
||||
|
||||
cq = kmalloc(sizeof(*cq), GFP_KERNEL);
|
||||
if (!cq) {
|
||||
pr_debug("%s: Unable to allocate CQ\n", __FUNCTION__);
|
||||
pr_debug("%s: Unable to allocate CQ\n", __func__);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
|
||||
if (err) {
|
||||
pr_debug("%s: error initializing CQ\n", __FUNCTION__);
|
||||
pr_debug("%s: error initializing CQ\n", __func__);
|
||||
kfree(cq);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -318,7 +321,7 @@ static int c2_destroy_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
struct c2_cq *cq = to_c2cq(ib_cq);
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
c2_free_cq(to_c2dev(ib_cq->device), cq);
|
||||
kfree(cq);
|
||||
@ -400,7 +403,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
|
||||
mr->umem = NULL;
|
||||
pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
|
||||
"*iova_start %llx, first pa %llx, last pa %llx\n",
|
||||
__FUNCTION__, page_shift, pbl_depth, total_len,
|
||||
__func__, page_shift, pbl_depth, total_len,
|
||||
(unsigned long long) *iova_start,
|
||||
(unsigned long long) page_list[0],
|
||||
(unsigned long long) page_list[pbl_depth-1]);
|
||||
@ -422,7 +425,7 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
struct ib_phys_buf bl;
|
||||
u64 kva = 0;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
/* AMSO1100 limit */
|
||||
bl.size = 0xffffffff;
|
||||
@ -442,7 +445,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
struct c2_pd *c2pd = to_c2pd(pd);
|
||||
struct c2_mr *c2mr;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
|
||||
if (!c2mr)
|
||||
@ -506,7 +509,7 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
|
||||
struct c2_mr *mr = to_c2mr(ib_mr);
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
|
||||
if (err)
|
||||
@ -523,14 +526,14 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
|
||||
static ssize_t show_rev(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return sprintf(buf, "%x\n", dev->props.hw_ver);
|
||||
}
|
||||
|
||||
static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct c2_dev *dev = container_of(cdev, struct c2_dev, ibdev.class_dev);
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return sprintf(buf, "%x.%x.%x\n",
|
||||
(int) (dev->props.fw_ver >> 32),
|
||||
(int) (dev->props.fw_ver >> 16) & 0xffff,
|
||||
@ -539,13 +542,13 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
|
||||
|
||||
static ssize_t show_hca(struct class_device *cdev, char *buf)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return sprintf(buf, "AMSO1100\n");
|
||||
}
|
||||
|
||||
static ssize_t show_board(struct class_device *cdev, char *buf)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
|
||||
}
|
||||
|
||||
@ -575,13 +578,13 @@ static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
@ -592,13 +595,13 @@ static int c2_process_mad(struct ib_device *ibdev,
|
||||
struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
/* Request a connection */
|
||||
return c2_llp_connect(cm_id, iw_param);
|
||||
@ -606,7 +609,7 @@ static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
||||
|
||||
static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
/* Accept the new connection */
|
||||
return c2_llp_accept(cm_id, iw_param);
|
||||
@ -616,7 +619,7 @@ static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
err = c2_llp_reject(cm_id, pdata, pdata_len);
|
||||
return err;
|
||||
@ -626,10 +629,10 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
err = c2_llp_service_create(cm_id, backlog);
|
||||
pr_debug("%s:%u err=%d\n",
|
||||
__FUNCTION__, __LINE__,
|
||||
__func__, __LINE__,
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
@ -637,7 +640,7 @@ static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
|
||||
static int c2_service_destroy(struct iw_cm_id *cm_id)
|
||||
{
|
||||
int err;
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
|
||||
err = c2_llp_service_destroy(cm_id);
|
||||
|
||||
@ -743,7 +746,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
|
||||
netdev = alloc_netdev(sizeof(*netdev), name, setup);
|
||||
if (!netdev) {
|
||||
printk(KERN_ERR PFX "%s - etherdev alloc failed",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -780,7 +783,7 @@ int c2_register_device(struct c2_dev *dev)
|
||||
if (ret)
|
||||
goto out2;
|
||||
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
|
||||
dev->ibdev.owner = THIS_MODULE;
|
||||
dev->ibdev.uverbs_cmd_mask =
|
||||
@ -873,13 +876,13 @@ out1:
|
||||
out2:
|
||||
free_netdev(dev->pseudo_netdev);
|
||||
out3:
|
||||
pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret);
|
||||
pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void c2_unregister_device(struct c2_dev *dev)
|
||||
{
|
||||
pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
unregister_netdev(dev->pseudo_netdev);
|
||||
free_netdev(dev->pseudo_netdev);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
|
@ -121,7 +121,7 @@ void c2_set_qp_state(struct c2_qp *qp, int c2_state)
|
||||
int new_state = to_ib_state(c2_state);
|
||||
|
||||
pr_debug("%s: qp[%p] state modify %s --> %s\n",
|
||||
__FUNCTION__,
|
||||
__func__,
|
||||
qp,
|
||||
to_ib_state_str(qp->state),
|
||||
to_ib_state_str(new_state));
|
||||
@ -141,7 +141,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
|
||||
int err;
|
||||
|
||||
pr_debug("%s:%d qp=%p, %s --> %s\n",
|
||||
__FUNCTION__, __LINE__,
|
||||
__func__, __LINE__,
|
||||
qp,
|
||||
to_ib_state_str(qp->state),
|
||||
to_ib_state_str(attr->qp_state));
|
||||
@ -224,7 +224,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
|
||||
qp->state = next_state;
|
||||
#ifdef DEBUG
|
||||
else
|
||||
pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);
|
||||
pr_debug("%s: c2_errno=%d\n", __func__, err);
|
||||
#endif
|
||||
/*
|
||||
* If we're going to error and generating the event here, then
|
||||
@ -243,7 +243,7 @@ int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
|
||||
vq_req_free(c2dev, vq_req);
|
||||
|
||||
pr_debug("%s:%d qp=%p, cur_state=%s\n",
|
||||
__FUNCTION__, __LINE__,
|
||||
__func__, __LINE__,
|
||||
qp,
|
||||
to_ib_state_str(qp->state));
|
||||
return err;
|
||||
@ -811,16 +811,24 @@ int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
|
||||
switch (ib_wr->opcode) {
|
||||
case IB_WR_SEND:
|
||||
if (ib_wr->send_flags & IB_SEND_SOLICITED) {
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
|
||||
msg_size = sizeof(struct c2wr_send_req);
|
||||
case IB_WR_SEND_WITH_INV:
|
||||
if (ib_wr->opcode == IB_WR_SEND) {
|
||||
if (ib_wr->send_flags & IB_SEND_SOLICITED)
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
|
||||
else
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
|
||||
wr.sqwr.send.remote_stag = 0;
|
||||
} else {
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
|
||||
msg_size = sizeof(struct c2wr_send_req);
|
||||
if (ib_wr->send_flags & IB_SEND_SOLICITED)
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
|
||||
else
|
||||
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
|
||||
wr.sqwr.send.remote_stag =
|
||||
cpu_to_be32(ib_wr->ex.invalidate_rkey);
|
||||
}
|
||||
|
||||
wr.sqwr.send.remote_stag = 0;
|
||||
msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
|
||||
msg_size = sizeof(struct c2wr_send_req) +
|
||||
sizeof(struct c2_data_addr) * ib_wr->num_sge;
|
||||
if (ib_wr->num_sge > qp->send_sgl_depth) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
@ -208,7 +208,7 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
|
||||
/*
|
||||
* Add an IP address to the RNIC interface
|
||||
*/
|
||||
int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
|
||||
int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
|
||||
{
|
||||
struct c2_vq_req *vq_req;
|
||||
struct c2wr_rnic_setconfig_req *wr;
|
||||
@ -270,7 +270,7 @@ int c2_add_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
|
||||
/*
|
||||
* Delete an IP address from the RNIC interface
|
||||
*/
|
||||
int c2_del_addr(struct c2_dev *c2dev, u32 inaddr, u32 inmask)
|
||||
int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
|
||||
{
|
||||
struct c2_vq_req *vq_req;
|
||||
struct c2wr_rnic_setconfig_req *wr;
|
||||
@ -455,7 +455,8 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
IB_DEVICE_CURR_QP_STATE_MOD |
|
||||
IB_DEVICE_SYS_IMAGE_GUID |
|
||||
IB_DEVICE_ZERO_STAG |
|
||||
IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
|
||||
IB_DEVICE_MEM_WINDOW |
|
||||
IB_DEVICE_SEND_W_INV);
|
||||
|
||||
/* Allocate the qptr_array */
|
||||
c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
|
||||
@ -506,17 +507,17 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
mmio_regs = c2dev->kva;
|
||||
/* Initialize the Verbs Request Queue */
|
||||
c2_mq_req_init(&c2dev->req_vq, 0,
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_QSIZE)),
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
|
||||
mmio_regs +
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
|
||||
mmio_regs +
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q0_SHARED)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
|
||||
C2_MQ_ADAPTER_TARGET);
|
||||
|
||||
/* Initialize the Verbs Reply Queue */
|
||||
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
|
||||
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
|
||||
qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
|
||||
msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
|
||||
q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
|
||||
&c2dev->rep_vq.host_dma, GFP_KERNEL);
|
||||
if (!q1_pages) {
|
||||
@ -524,7 +525,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
goto bail1;
|
||||
}
|
||||
pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
|
||||
pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
|
||||
pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
|
||||
(unsigned long long) c2dev->rep_vq.host_dma);
|
||||
c2_mq_rep_init(&c2dev->rep_vq,
|
||||
1,
|
||||
@ -532,12 +533,12 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
msgsize,
|
||||
q1_pages,
|
||||
mmio_regs +
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_SHARED)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
|
||||
C2_MQ_HOST_TARGET);
|
||||
|
||||
/* Initialize the Asynchronus Event Queue */
|
||||
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
|
||||
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
|
||||
qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
|
||||
msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
|
||||
q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
|
||||
&c2dev->aeq.host_dma, GFP_KERNEL);
|
||||
if (!q2_pages) {
|
||||
@ -545,7 +546,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
goto bail2;
|
||||
}
|
||||
pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
|
||||
pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages,
|
||||
pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
|
||||
(unsigned long long) c2dev->aeq.host_dma);
|
||||
c2_mq_rep_init(&c2dev->aeq,
|
||||
2,
|
||||
@ -553,7 +554,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
|
||||
msgsize,
|
||||
q2_pages,
|
||||
mmio_regs +
|
||||
be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_SHARED)),
|
||||
be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
|
||||
C2_MQ_HOST_TARGET);
|
||||
|
||||
/* Initialize the verbs request allocator */
|
||||
|
@ -197,7 +197,7 @@ int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
|
||||
*/
|
||||
while (msg == NULL) {
|
||||
pr_debug("%s:%d no available msg in VQ, waiting...\n",
|
||||
__FUNCTION__, __LINE__);
|
||||
__func__, __LINE__);
|
||||
init_waitqueue_entry(&__wait, current);
|
||||
add_wait_queue(&c2dev->req_vq_wo, &__wait);
|
||||
spin_unlock(&c2dev->vqlock);
|
||||
|
@ -180,8 +180,8 @@ enum c2_wr_type {
|
||||
};
|
||||
|
||||
struct c2_netaddr {
|
||||
u32 ip_addr;
|
||||
u32 netmask;
|
||||
__be32 ip_addr;
|
||||
__be32 netmask;
|
||||
u32 mtu;
|
||||
};
|
||||
|
||||
@ -199,9 +199,9 @@ struct c2_route {
|
||||
* A Scatter Gather Entry.
|
||||
*/
|
||||
struct c2_data_addr {
|
||||
u32 stag;
|
||||
u32 length;
|
||||
u64 to;
|
||||
__be32 stag;
|
||||
__be32 length;
|
||||
__be64 to;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -274,7 +274,7 @@ struct c2wr_hdr {
|
||||
* from the host to adapter by libccil, but we copy it anyway
|
||||
* to make the memcpy to the adapter better aligned.
|
||||
*/
|
||||
u32 wqe_count;
|
||||
__be32 wqe_count;
|
||||
|
||||
/* Put these fields next so that later 32- and 64-bit
|
||||
* quantities are naturally aligned.
|
||||
@ -316,8 +316,8 @@ enum c2_rnic_flags {
|
||||
struct c2wr_rnic_open_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 user_context;
|
||||
u16 flags; /* See enum c2_rnic_flags */
|
||||
u16 port_num;
|
||||
__be16 flags; /* See enum c2_rnic_flags */
|
||||
__be16 port_num;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct c2wr_rnic_open_rep {
|
||||
@ -341,30 +341,30 @@ struct c2wr_rnic_query_req {
|
||||
struct c2wr_rnic_query_rep {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 user_context;
|
||||
u32 vendor_id;
|
||||
u32 part_number;
|
||||
u32 hw_version;
|
||||
u32 fw_ver_major;
|
||||
u32 fw_ver_minor;
|
||||
u32 fw_ver_patch;
|
||||
__be32 vendor_id;
|
||||
__be32 part_number;
|
||||
__be32 hw_version;
|
||||
__be32 fw_ver_major;
|
||||
__be32 fw_ver_minor;
|
||||
__be32 fw_ver_patch;
|
||||
char fw_ver_build_str[WR_BUILD_STR_LEN];
|
||||
u32 max_qps;
|
||||
u32 max_qp_depth;
|
||||
__be32 max_qps;
|
||||
__be32 max_qp_depth;
|
||||
u32 max_srq_depth;
|
||||
u32 max_send_sgl_depth;
|
||||
u32 max_rdma_sgl_depth;
|
||||
u32 max_cqs;
|
||||
u32 max_cq_depth;
|
||||
__be32 max_cqs;
|
||||
__be32 max_cq_depth;
|
||||
u32 max_cq_event_handlers;
|
||||
u32 max_mrs;
|
||||
__be32 max_mrs;
|
||||
u32 max_pbl_depth;
|
||||
u32 max_pds;
|
||||
u32 max_global_ird;
|
||||
__be32 max_pds;
|
||||
__be32 max_global_ird;
|
||||
u32 max_global_ord;
|
||||
u32 max_qp_ird;
|
||||
u32 max_qp_ord;
|
||||
__be32 max_qp_ird;
|
||||
__be32 max_qp_ord;
|
||||
u32 flags;
|
||||
u32 max_mws;
|
||||
__be32 max_mws;
|
||||
u32 pbe_range_low;
|
||||
u32 pbe_range_high;
|
||||
u32 max_srqs;
|
||||
@ -405,7 +405,7 @@ union c2wr_rnic_getconfig {
|
||||
struct c2wr_rnic_setconfig_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 rnic_handle;
|
||||
u32 option; /* See c2_setconfig_cmd_t */
|
||||
__be32 option; /* See c2_setconfig_cmd_t */
|
||||
/* variable data and pad. See c2_netaddr and c2_route */
|
||||
u8 data[0];
|
||||
} __attribute__((packed)) ;
|
||||
@ -441,18 +441,18 @@ union c2wr_rnic_close {
|
||||
*/
|
||||
struct c2wr_cq_create_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 shared_ht;
|
||||
__be64 shared_ht;
|
||||
u64 user_context;
|
||||
u64 msg_pool;
|
||||
__be64 msg_pool;
|
||||
u32 rnic_handle;
|
||||
u32 msg_size;
|
||||
u32 depth;
|
||||
__be32 msg_size;
|
||||
__be32 depth;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
struct c2wr_cq_create_rep {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 mq_index;
|
||||
u32 adapter_shared;
|
||||
__be32 mq_index;
|
||||
__be32 adapter_shared;
|
||||
u32 cq_handle;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
@ -585,40 +585,40 @@ enum c2wr_qp_flags {
|
||||
|
||||
struct c2wr_qp_create_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 shared_sq_ht;
|
||||
u64 shared_rq_ht;
|
||||
__be64 shared_sq_ht;
|
||||
__be64 shared_rq_ht;
|
||||
u64 user_context;
|
||||
u32 rnic_handle;
|
||||
u32 sq_cq_handle;
|
||||
u32 rq_cq_handle;
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
__be32 sq_depth;
|
||||
__be32 rq_depth;
|
||||
u32 srq_handle;
|
||||
u32 srq_limit;
|
||||
u32 flags; /* see enum c2wr_qp_flags */
|
||||
u32 send_sgl_depth;
|
||||
u32 recv_sgl_depth;
|
||||
u32 rdma_write_sgl_depth;
|
||||
u32 ord;
|
||||
u32 ird;
|
||||
__be32 flags; /* see enum c2wr_qp_flags */
|
||||
__be32 send_sgl_depth;
|
||||
__be32 recv_sgl_depth;
|
||||
__be32 rdma_write_sgl_depth;
|
||||
__be32 ord;
|
||||
__be32 ird;
|
||||
u32 pd_id;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
struct c2wr_qp_create_rep {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
__be32 sq_depth;
|
||||
__be32 rq_depth;
|
||||
u32 send_sgl_depth;
|
||||
u32 recv_sgl_depth;
|
||||
u32 rdma_write_sgl_depth;
|
||||
u32 ord;
|
||||
u32 ird;
|
||||
u32 sq_msg_size;
|
||||
u32 sq_mq_index;
|
||||
u32 sq_mq_start;
|
||||
u32 rq_msg_size;
|
||||
u32 rq_mq_index;
|
||||
u32 rq_mq_start;
|
||||
__be32 sq_msg_size;
|
||||
__be32 sq_mq_index;
|
||||
__be32 sq_mq_start;
|
||||
__be32 rq_msg_size;
|
||||
__be32 rq_mq_index;
|
||||
__be32 rq_mq_start;
|
||||
u32 qp_handle;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
@ -667,11 +667,11 @@ struct c2wr_qp_modify_req {
|
||||
u32 stream_msg_length;
|
||||
u32 rnic_handle;
|
||||
u32 qp_handle;
|
||||
u32 next_qp_state;
|
||||
u32 ord;
|
||||
u32 ird;
|
||||
u32 sq_depth;
|
||||
u32 rq_depth;
|
||||
__be32 next_qp_state;
|
||||
__be32 ord;
|
||||
__be32 ird;
|
||||
__be32 sq_depth;
|
||||
__be32 rq_depth;
|
||||
u32 llp_ep_handle;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
@ -721,10 +721,10 @@ struct c2wr_qp_connect_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 rnic_handle;
|
||||
u32 qp_handle;
|
||||
u32 remote_addr;
|
||||
u16 remote_port;
|
||||
__be32 remote_addr;
|
||||
__be16 remote_port;
|
||||
u16 pad;
|
||||
u32 private_data_length;
|
||||
__be32 private_data_length;
|
||||
u8 private_data[0]; /* Private data in-line. */
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
@ -759,25 +759,25 @@ union c2wr_nsmr_stag_alloc {
|
||||
|
||||
struct c2wr_nsmr_register_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 va;
|
||||
__be64 va;
|
||||
u32 rnic_handle;
|
||||
u16 flags;
|
||||
__be16 flags;
|
||||
u8 stag_key;
|
||||
u8 pad;
|
||||
u32 pd_id;
|
||||
u32 pbl_depth;
|
||||
u32 pbe_size;
|
||||
u32 fbo;
|
||||
u32 length;
|
||||
u32 addrs_length;
|
||||
__be32 pbl_depth;
|
||||
__be32 pbe_size;
|
||||
__be32 fbo;
|
||||
__be32 length;
|
||||
__be32 addrs_length;
|
||||
/* array of paddrs (must be aligned on a 64bit boundary) */
|
||||
u64 paddrs[0];
|
||||
__be64 paddrs[0];
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
struct c2wr_nsmr_register_rep {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 pbl_depth;
|
||||
u32 stag_index;
|
||||
__be32 stag_index;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
union c2wr_nsmr_register {
|
||||
@ -788,11 +788,11 @@ union c2wr_nsmr_register {
|
||||
struct c2wr_nsmr_pbl_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 rnic_handle;
|
||||
u32 flags;
|
||||
u32 stag_index;
|
||||
u32 addrs_length;
|
||||
__be32 flags;
|
||||
__be32 stag_index;
|
||||
__be32 addrs_length;
|
||||
/* array of paddrs (must be aligned on a 64bit boundary) */
|
||||
u64 paddrs[0];
|
||||
__be64 paddrs[0];
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
struct c2wr_nsmr_pbl_rep {
|
||||
@ -847,7 +847,7 @@ union c2wr_mw_query {
|
||||
struct c2wr_stag_dealloc_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u32 rnic_handle;
|
||||
u32 stag_index;
|
||||
__be32 stag_index;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
struct c2wr_stag_dealloc_rep {
|
||||
@ -949,7 +949,7 @@ struct c2wr_ce {
|
||||
u64 qp_user_context; /* c2_user_qp_t * */
|
||||
u32 qp_state; /* Current QP State */
|
||||
u32 handle; /* QPID or EP Handle */
|
||||
u32 bytes_rcvd; /* valid for RECV WCs */
|
||||
__be32 bytes_rcvd; /* valid for RECV WCs */
|
||||
u32 stag;
|
||||
} __attribute__((packed)) ;
|
||||
|
||||
@ -984,8 +984,8 @@ struct c2_rq_hdr {
|
||||
*/
|
||||
struct c2wr_send_req {
|
||||
struct c2_sq_hdr sq_hdr;
|
||||
u32 sge_len;
|
||||
u32 remote_stag;
|
||||
__be32 sge_len;
|
||||
__be32 remote_stag;
|
||||
u8 data[0]; /* SGE array */
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -996,9 +996,9 @@ union c2wr_send {
|
||||
|
||||
struct c2wr_rdma_write_req {
|
||||
struct c2_sq_hdr sq_hdr;
|
||||
u64 remote_to;
|
||||
u32 remote_stag;
|
||||
u32 sge_len;
|
||||
__be64 remote_to;
|
||||
__be32 remote_stag;
|
||||
__be32 sge_len;
|
||||
u8 data[0]; /* SGE array */
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -1009,11 +1009,11 @@ union c2wr_rdma_write {
|
||||
|
||||
struct c2wr_rdma_read_req {
|
||||
struct c2_sq_hdr sq_hdr;
|
||||
u64 local_to;
|
||||
u64 remote_to;
|
||||
u32 local_stag;
|
||||
u32 remote_stag;
|
||||
u32 length;
|
||||
__be64 local_to;
|
||||
__be64 remote_to;
|
||||
__be32 local_stag;
|
||||
__be32 remote_stag;
|
||||
__be32 length;
|
||||
} __attribute__((packed));
|
||||
|
||||
union c2wr_rdma_read {
|
||||
@ -1113,9 +1113,9 @@ union c2wr_recv {
|
||||
struct c2wr_ae_hdr {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 user_context; /* user context for this res. */
|
||||
u32 resource_type; /* see enum c2_resource_indicator */
|
||||
u32 resource; /* handle for resource */
|
||||
u32 qp_state; /* current QP State */
|
||||
__be32 resource_type; /* see enum c2_resource_indicator */
|
||||
__be32 resource; /* handle for resource */
|
||||
__be32 qp_state; /* current QP State */
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
@ -1124,11 +1124,11 @@ struct c2wr_ae_hdr {
|
||||
*/
|
||||
struct c2wr_ae_active_connect_results {
|
||||
struct c2wr_ae_hdr ae_hdr;
|
||||
u32 laddr;
|
||||
u32 raddr;
|
||||
u16 lport;
|
||||
u16 rport;
|
||||
u32 private_data_length;
|
||||
__be32 laddr;
|
||||
__be32 raddr;
|
||||
__be16 lport;
|
||||
__be16 rport;
|
||||
__be32 private_data_length;
|
||||
u8 private_data[0]; /* data is in-line in the msg. */
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -1142,11 +1142,11 @@ struct c2wr_ae_active_connect_results {
|
||||
struct c2wr_ae_connection_request {
|
||||
struct c2wr_ae_hdr ae_hdr;
|
||||
u32 cr_handle; /* connreq handle (sock ptr) */
|
||||
u32 laddr;
|
||||
u32 raddr;
|
||||
u16 lport;
|
||||
u16 rport;
|
||||
u32 private_data_length;
|
||||
__be32 laddr;
|
||||
__be32 raddr;
|
||||
__be16 lport;
|
||||
__be16 rport;
|
||||
__be32 private_data_length;
|
||||
u8 private_data[0]; /* data is in-line in the msg. */
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -1158,12 +1158,12 @@ union c2wr_ae {
|
||||
|
||||
struct c2wr_init_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 hint_count;
|
||||
u64 q0_host_shared;
|
||||
u64 q1_host_shared;
|
||||
u64 q1_host_msg_pool;
|
||||
u64 q2_host_shared;
|
||||
u64 q2_host_msg_pool;
|
||||
__be64 hint_count;
|
||||
__be64 q0_host_shared;
|
||||
__be64 q1_host_shared;
|
||||
__be64 q1_host_msg_pool;
|
||||
__be64 q2_host_shared;
|
||||
__be64 q2_host_msg_pool;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct c2wr_init_rep {
|
||||
@ -1276,10 +1276,10 @@ struct c2wr_ep_listen_create_req {
|
||||
struct c2wr_hdr hdr;
|
||||
u64 user_context; /* returned in AEs. */
|
||||
u32 rnic_handle;
|
||||
u32 local_addr; /* local addr, or 0 */
|
||||
u16 local_port; /* 0 means "pick one" */
|
||||
__be32 local_addr; /* local addr, or 0 */
|
||||
__be16 local_port; /* 0 means "pick one" */
|
||||
u16 pad;
|
||||
u32 backlog; /* tradional tcp listen bl */
|
||||
__be32 backlog; /* tradional tcp listen bl */
|
||||
} __attribute__((packed));
|
||||
|
||||
struct c2wr_ep_listen_create_rep {
|
||||
@ -1340,7 +1340,7 @@ struct c2wr_cr_accept_req {
|
||||
u32 rnic_handle;
|
||||
u32 qp_handle; /* QP to bind to this LLP conn */
|
||||
u32 ep_handle; /* LLP handle to accept */
|
||||
u32 private_data_length;
|
||||
__be32 private_data_length;
|
||||
u8 private_data[0]; /* data in-line in msg. */
|
||||
} __attribute__((packed));
|
||||
|
||||
@ -1508,7 +1508,7 @@ static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
|
||||
{
|
||||
((struct c2wr_hdr *) wr)->sge_count = sge_count;
|
||||
}
|
||||
static __inline__ u32 c2_wr_get_wqe_count(void *wr)
|
||||
static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
|
||||
{
|
||||
return ((struct c2wr_hdr *) wr)->wqe_count;
|
||||
}
|
||||
|
@ -45,16 +45,16 @@ void cxio_dump_tpt(struct cxio_rdev *rdev, u32 stag)
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
return;
|
||||
}
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base;
|
||||
m->len = size;
|
||||
PDBG("%s TPT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
|
||||
PDBG("%s TPT addr 0x%x len %d\n", __func__, m->addr, m->len);
|
||||
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
|
||||
if (rc) {
|
||||
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
|
||||
PDBG("%s toectl returned error %d\n", __func__, rc);
|
||||
kfree(m);
|
||||
return;
|
||||
}
|
||||
@ -82,17 +82,17 @@ void cxio_dump_pbl(struct cxio_rdev *rdev, u32 pbl_addr, uint len, u8 shift)
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
return;
|
||||
}
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = pbl_addr;
|
||||
m->len = size;
|
||||
PDBG("%s PBL addr 0x%x len %d depth %d\n",
|
||||
__FUNCTION__, m->addr, m->len, npages);
|
||||
__func__, m->addr, m->len, npages);
|
||||
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
|
||||
if (rc) {
|
||||
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
|
||||
PDBG("%s toectl returned error %d\n", __func__, rc);
|
||||
kfree(m);
|
||||
return;
|
||||
}
|
||||
@ -144,16 +144,16 @@ void cxio_dump_rqt(struct cxio_rdev *rdev, u32 hwtid, int nents)
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
return;
|
||||
}
|
||||
m->mem_id = MEM_PMRX;
|
||||
m->addr = ((hwtid)<<10) + rdev->rnic_info.rqt_base;
|
||||
m->len = size;
|
||||
PDBG("%s RQT addr 0x%x len %d\n", __FUNCTION__, m->addr, m->len);
|
||||
PDBG("%s RQT addr 0x%x len %d\n", __func__, m->addr, m->len);
|
||||
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
|
||||
if (rc) {
|
||||
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
|
||||
PDBG("%s toectl returned error %d\n", __func__, rc);
|
||||
kfree(m);
|
||||
return;
|
||||
}
|
||||
@ -177,16 +177,16 @@ void cxio_dump_tcb(struct cxio_rdev *rdev, u32 hwtid)
|
||||
|
||||
m = kmalloc(sizeof(*m) + size, GFP_ATOMIC);
|
||||
if (!m) {
|
||||
PDBG("%s couldn't allocate memory.\n", __FUNCTION__);
|
||||
PDBG("%s couldn't allocate memory.\n", __func__);
|
||||
return;
|
||||
}
|
||||
m->mem_id = MEM_CM;
|
||||
m->addr = hwtid * size;
|
||||
m->len = size;
|
||||
PDBG("%s TCB %d len %d\n", __FUNCTION__, m->addr, m->len);
|
||||
PDBG("%s TCB %d len %d\n", __func__, m->addr, m->len);
|
||||
rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m);
|
||||
if (rc) {
|
||||
PDBG("%s toectl returned error %d\n", __FUNCTION__, rc);
|
||||
PDBG("%s toectl returned error %d\n", __func__, rc);
|
||||
kfree(m);
|
||||
return;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
|
||||
struct t3_modify_qp_wr *wqe;
|
||||
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
PDBG("%s alloc_skb failed\n", __FUNCTION__);
|
||||
PDBG("%s alloc_skb failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
|
||||
@ -225,7 +225,7 @@ static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
|
||||
PDBG("%s qpid 0x%x\n", __func__, qpid);
|
||||
return qpid;
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
return;
|
||||
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
|
||||
PDBG("%s qpid 0x%x\n", __func__, qpid);
|
||||
entry->qpid = qpid;
|
||||
mutex_lock(&uctx->lock);
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
@ -300,7 +300,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
|
||||
if (!kernel_domain)
|
||||
wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
|
||||
(wq->qpid << rdev_p->qpshift);
|
||||
PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __FUNCTION__,
|
||||
PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
|
||||
wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
|
||||
return 0;
|
||||
err4:
|
||||
@ -345,7 +345,7 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe cqe;
|
||||
|
||||
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
|
||||
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
|
||||
wq, cq, cq->sw_rptr, cq->sw_wptr);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
|
||||
@ -363,10 +363,10 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
|
||||
{
|
||||
u32 ptr;
|
||||
|
||||
PDBG("%s wq %p cq %p\n", __FUNCTION__, wq, cq);
|
||||
PDBG("%s wq %p cq %p\n", __func__, wq, cq);
|
||||
|
||||
/* flush RQ */
|
||||
PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __FUNCTION__,
|
||||
PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
|
||||
wq->rq_rptr, wq->rq_wptr, count);
|
||||
ptr = wq->rq_rptr + count;
|
||||
while (ptr++ != wq->rq_wptr)
|
||||
@ -378,7 +378,7 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
|
||||
{
|
||||
struct t3_cqe cqe;
|
||||
|
||||
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __FUNCTION__,
|
||||
PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
|
||||
wq, cq, cq->sw_rptr, cq->sw_wptr);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
|
||||
@ -415,11 +415,11 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
|
||||
{
|
||||
struct t3_cqe *cqe, *swcqe;
|
||||
|
||||
PDBG("%s cq %p cqid 0x%x\n", __FUNCTION__, cq, cq->cqid);
|
||||
PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
|
||||
cqe = cxio_next_hw_cqe(cq);
|
||||
while (cqe) {
|
||||
PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
|
||||
__FUNCTION__, cq->rptr, cq->sw_wptr);
|
||||
__func__, cq->rptr, cq->sw_wptr);
|
||||
swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
|
||||
*swcqe = *cqe;
|
||||
swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
|
||||
@ -461,7 +461,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
|
||||
(*count)++;
|
||||
ptr++;
|
||||
}
|
||||
PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
|
||||
PDBG("%s cq %p count %d\n", __func__, cq, *count);
|
||||
}
|
||||
|
||||
void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
|
||||
@ -470,7 +470,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
|
||||
u32 ptr;
|
||||
|
||||
*count = 0;
|
||||
PDBG("%s count zero %d\n", __FUNCTION__, *count);
|
||||
PDBG("%s count zero %d\n", __func__, *count);
|
||||
ptr = cq->sw_rptr;
|
||||
while (!Q_EMPTY(ptr, cq->sw_wptr)) {
|
||||
cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
|
||||
@ -479,7 +479,7 @@ void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
|
||||
(*count)++;
|
||||
ptr++;
|
||||
}
|
||||
PDBG("%s cq %p count %d\n", __FUNCTION__, cq, *count);
|
||||
PDBG("%s cq %p count %d\n", __func__, cq, *count);
|
||||
}
|
||||
|
||||
static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
|
||||
@ -506,12 +506,12 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
|
||||
|
||||
skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
PDBG("%s alloc_skb failed\n", __FUNCTION__);
|
||||
PDBG("%s alloc_skb failed\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
err = cxio_hal_init_ctrl_cq(rdev_p);
|
||||
if (err) {
|
||||
PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
|
||||
PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
|
||||
goto err;
|
||||
}
|
||||
rdev_p->ctrl_qp.workq = dma_alloc_coherent(
|
||||
@ -521,7 +521,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
|
||||
&(rdev_p->ctrl_qp.dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!rdev_p->ctrl_qp.workq) {
|
||||
PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
|
||||
PDBG("%s dma_alloc_coherent failed\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
@ -591,25 +591,25 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
|
||||
addr &= 0x7FFFFFF;
|
||||
nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
|
||||
PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
|
||||
__FUNCTION__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
|
||||
__func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
|
||||
nr_wqe, data, addr);
|
||||
utx_len = 3; /* in 32B unit */
|
||||
for (i = 0; i < nr_wqe; i++) {
|
||||
if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
|
||||
T3_CTRL_QP_SIZE_LOG2)) {
|
||||
PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
|
||||
"wait for more space i %d\n", __FUNCTION__,
|
||||
"wait for more space i %d\n", __func__,
|
||||
rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
|
||||
if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
|
||||
!Q_FULL(rdev_p->ctrl_qp.rptr,
|
||||
rdev_p->ctrl_qp.wptr,
|
||||
T3_CTRL_QP_SIZE_LOG2))) {
|
||||
PDBG("%s ctrl_qp workq interrupted\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
PDBG("%s ctrl_qp wakeup, continue posting work request "
|
||||
"i %d\n", __FUNCTION__, i);
|
||||
"i %d\n", __func__, i);
|
||||
}
|
||||
wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
|
||||
(1 << T3_CTRL_QP_SIZE_LOG2)));
|
||||
@ -630,7 +630,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
|
||||
if ((i != 0) &&
|
||||
(i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
|
||||
flag = T3_COMPLETION_FLAG;
|
||||
PDBG("%s force completion at i %d\n", __FUNCTION__, i);
|
||||
PDBG("%s force completion at i %d\n", __func__, i);
|
||||
}
|
||||
|
||||
/* build the utx mem command */
|
||||
@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
|
||||
*stag = (stag_idx << 8) | ((*stag) & 0xFF);
|
||||
}
|
||||
PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
|
||||
__FUNCTION__, stag_state, type, pdid, stag_idx);
|
||||
__func__, stag_state, type, pdid, stag_idx);
|
||||
|
||||
if (reset_tpt_entry)
|
||||
cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
|
||||
@ -718,7 +718,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
|
||||
if (pbl) {
|
||||
|
||||
PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
|
||||
__FUNCTION__, *pbl_addr, rdev_p->rnic_info.pbl_base,
|
||||
__func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
|
||||
*pbl_size);
|
||||
err = cxio_hal_ctrl_qp_write_mem(rdev_p,
|
||||
(*pbl_addr >> 5),
|
||||
@ -814,7 +814,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
|
||||
struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
PDBG("%s rdev_p %p\n", __FUNCTION__, rdev_p);
|
||||
PDBG("%s rdev_p %p\n", __func__, rdev_p);
|
||||
wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
|
||||
wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
|
||||
wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
|
||||
@ -856,7 +856,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
|
||||
struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
|
||||
PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
|
||||
" se %0x notify %0x cqbranch %0x creditth %0x\n",
|
||||
cnt, __FUNCTION__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
|
||||
cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
|
||||
RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
|
||||
RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
|
||||
RSPQ_CREDIT_THRESH(rsp_msg));
|
||||
@ -868,7 +868,7 @@ static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
|
||||
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
|
||||
rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
|
||||
if (!rdev_p) {
|
||||
PDBG("%s called by t3cdev %p with null ulp\n", __FUNCTION__,
|
||||
PDBG("%s called by t3cdev %p with null ulp\n", __func__,
|
||||
t3cdev_p);
|
||||
return 0;
|
||||
}
|
||||
@ -908,13 +908,13 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
|
||||
T3_MAX_DEV_NAME_LEN);
|
||||
} else {
|
||||
PDBG("%s t3cdev_p or dev_name must be set\n", __FUNCTION__);
|
||||
PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_add_tail(&rdev_p->entry, &rdev_list);
|
||||
|
||||
PDBG("%s opening rnic dev %s\n", __FUNCTION__, rdev_p->dev_name);
|
||||
PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
|
||||
memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
|
||||
if (!rdev_p->t3cdev_p)
|
||||
rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
|
||||
@ -923,14 +923,14 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
&(rdev_p->rnic_info));
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
|
||||
__FUNCTION__, rdev_p->t3cdev_p, err);
|
||||
__func__, rdev_p->t3cdev_p, err);
|
||||
goto err1;
|
||||
}
|
||||
err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
|
||||
&(rdev_p->port_info));
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
|
||||
__FUNCTION__, rdev_p->t3cdev_p, err);
|
||||
__func__, rdev_p->t3cdev_p, err);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
@ -947,7 +947,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
|
||||
PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
|
||||
"pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
|
||||
__FUNCTION__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
|
||||
__func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
|
||||
rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
|
||||
rdev_p->rnic_info.pbl_base,
|
||||
rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
|
||||
@ -961,7 +961,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
err = cxio_hal_init_ctrl_qp(rdev_p);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
|
||||
__FUNCTION__, err);
|
||||
__func__, err);
|
||||
goto err1;
|
||||
}
|
||||
err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
|
||||
@ -969,19 +969,19 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
T3_MAX_NUM_PD);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s error %d initializing hal resources.\n",
|
||||
__FUNCTION__, err);
|
||||
__func__, err);
|
||||
goto err2;
|
||||
}
|
||||
err = cxio_hal_pblpool_create(rdev_p);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
|
||||
__FUNCTION__, err);
|
||||
__func__, err);
|
||||
goto err3;
|
||||
}
|
||||
err = cxio_hal_rqtpool_create(rdev_p);
|
||||
if (err) {
|
||||
printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
|
||||
__FUNCTION__, err);
|
||||
__func__, err);
|
||||
goto err4;
|
||||
}
|
||||
return 0;
|
||||
@ -1043,7 +1043,7 @@ static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
|
||||
* Insert this completed cqe into the swcq.
|
||||
*/
|
||||
PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
|
||||
__FUNCTION__, Q_PTR2IDX(ptr, wq->sq_size_log2),
|
||||
__func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
|
||||
Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
|
||||
sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
|
||||
*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
|
||||
@ -1112,7 +1112,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
||||
|
||||
PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
|
||||
" opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
|
||||
__FUNCTION__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
|
||||
__func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
|
||||
CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
|
||||
CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
|
||||
CQE_WRID_LOW(*hw_cqe));
|
||||
@ -1215,7 +1215,7 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
||||
struct t3_swsq *sqp;
|
||||
|
||||
PDBG("%s out of order completion going in swsq at idx %ld\n",
|
||||
__FUNCTION__,
|
||||
__func__,
|
||||
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
|
||||
sqp = wq->sq +
|
||||
Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
|
||||
@ -1234,13 +1234,13 @@ proc_cqe:
|
||||
*/
|
||||
if (SQ_TYPE(*hw_cqe)) {
|
||||
wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
|
||||
PDBG("%s completing sq idx %ld\n", __FUNCTION__,
|
||||
PDBG("%s completing sq idx %ld\n", __func__,
|
||||
Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
|
||||
*cookie = (wq->sq +
|
||||
Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
|
||||
wq->sq_rptr++;
|
||||
} else {
|
||||
PDBG("%s completing rq idx %ld\n", __FUNCTION__,
|
||||
PDBG("%s completing rq idx %ld\n", __func__,
|
||||
Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
|
||||
*cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
|
||||
wq->rq_rptr++;
|
||||
@ -1255,11 +1255,11 @@ flush_wq:
|
||||
skip_cqe:
|
||||
if (SW_CQE(*hw_cqe)) {
|
||||
PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
|
||||
__FUNCTION__, cq, cq->cqid, cq->sw_rptr);
|
||||
__func__, cq, cq->cqid, cq->sw_rptr);
|
||||
++cq->sw_rptr;
|
||||
} else {
|
||||
PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
|
||||
__FUNCTION__, cq, cq->cqid, cq->rptr);
|
||||
__func__, cq, cq->cqid, cq->rptr);
|
||||
++cq->rptr;
|
||||
|
||||
/*
|
||||
|
@ -206,13 +206,13 @@ void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
|
||||
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
|
||||
{
|
||||
u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
|
||||
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
|
||||
PDBG("%s qpid 0x%x\n", __func__, qpid);
|
||||
return qpid;
|
||||
}
|
||||
|
||||
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
|
||||
{
|
||||
PDBG("%s qpid 0x%x\n", __FUNCTION__, qpid);
|
||||
PDBG("%s qpid 0x%x\n", __func__, qpid);
|
||||
cxio_hal_put_resource(rscp->qpid_fifo, qpid);
|
||||
}
|
||||
|
||||
@ -255,13 +255,13 @@ void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
|
||||
u32 cxio_hal_pblpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->pbl_pool, size);
|
||||
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_pblpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
gen_pool_free(rdev_p->pbl_pool, (unsigned long)addr, size);
|
||||
}
|
||||
|
||||
@ -292,13 +292,13 @@ void cxio_hal_pblpool_destroy(struct cxio_rdev *rdev_p)
|
||||
u32 cxio_hal_rqtpool_alloc(struct cxio_rdev *rdev_p, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev_p->rqt_pool, size << 6);
|
||||
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, (u32)addr, size << 6);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
|
||||
return (u32)addr;
|
||||
}
|
||||
|
||||
void cxio_hal_rqtpool_free(struct cxio_rdev *rdev_p, u32 addr, int size)
|
||||
{
|
||||
PDBG("%s addr 0x%x size %d\n", __FUNCTION__, addr, size << 6);
|
||||
PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
|
||||
gen_pool_free(rdev_p->rqt_pool, (unsigned long)addr, size << 6);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ static DEFINE_MUTEX(dev_mutex);
|
||||
|
||||
static void rnic_init(struct iwch_dev *rnicp)
|
||||
{
|
||||
PDBG("%s iwch_dev %p\n", __FUNCTION__, rnicp);
|
||||
PDBG("%s iwch_dev %p\n", __func__, rnicp);
|
||||
idr_init(&rnicp->cqidr);
|
||||
idr_init(&rnicp->qpidr);
|
||||
idr_init(&rnicp->mmidr);
|
||||
@ -106,7 +106,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
|
||||
struct iwch_dev *rnicp;
|
||||
static int vers_printed;
|
||||
|
||||
PDBG("%s t3cdev %p\n", __FUNCTION__, tdev);
|
||||
PDBG("%s t3cdev %p\n", __func__, tdev);
|
||||
if (!vers_printed++)
|
||||
printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
|
||||
DRV_VERSION);
|
||||
@ -144,7 +144,7 @@ static void open_rnic_dev(struct t3cdev *tdev)
|
||||
static void close_rnic_dev(struct t3cdev *tdev)
|
||||
{
|
||||
struct iwch_dev *dev, *tmp;
|
||||
PDBG("%s t3cdev %p\n", __FUNCTION__, tdev);
|
||||
PDBG("%s t3cdev %p\n", __func__, tdev);
|
||||
mutex_lock(&dev_mutex);
|
||||
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
|
||||
if (dev->rdev.t3cdev_p == tdev) {
|
||||
|
@ -147,7 +147,7 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
|
||||
void *handle, u32 id)
|
||||
{
|
||||
int ret;
|
||||
u32 newid;
|
||||
int newid;
|
||||
|
||||
do {
|
||||
if (!idr_pre_get(idr, GFP_KERNEL)) {
|
||||
|
@ -110,9 +110,9 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status);
|
||||
|
||||
static void start_ep_timer(struct iwch_ep *ep)
|
||||
{
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
if (timer_pending(&ep->timer)) {
|
||||
PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
|
||||
del_timer_sync(&ep->timer);
|
||||
} else
|
||||
get_ep(&ep->com);
|
||||
@ -124,7 +124,7 @@ static void start_ep_timer(struct iwch_ep *ep)
|
||||
|
||||
static void stop_ep_timer(struct iwch_ep *ep)
|
||||
{
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
del_timer_sync(&ep->timer);
|
||||
put_ep(&ep->com);
|
||||
}
|
||||
@ -190,7 +190,7 @@ int iwch_resume_tid(struct iwch_ep *ep)
|
||||
|
||||
static void set_emss(struct iwch_ep *ep, u16 opt)
|
||||
{
|
||||
PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt);
|
||||
PDBG("%s ep %p opt %u\n", __func__, ep, opt);
|
||||
ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40;
|
||||
if (G_TCPOPT_TSTAMP(opt))
|
||||
ep->emss -= 12;
|
||||
@ -220,7 +220,7 @@ static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&epc->lock, flags);
|
||||
PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]);
|
||||
PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
|
||||
__state_set(epc, new);
|
||||
spin_unlock_irqrestore(&epc->lock, flags);
|
||||
return;
|
||||
@ -236,7 +236,7 @@ static void *alloc_ep(int size, gfp_t gfp)
|
||||
spin_lock_init(&epc->lock);
|
||||
init_waitqueue_head(&epc->waitq);
|
||||
}
|
||||
PDBG("%s alloc ep %p\n", __FUNCTION__, epc);
|
||||
PDBG("%s alloc ep %p\n", __func__, epc);
|
||||
return epc;
|
||||
}
|
||||
|
||||
@ -244,13 +244,13 @@ void __free_ep(struct kref *kref)
|
||||
{
|
||||
struct iwch_ep_common *epc;
|
||||
epc = container_of(kref, struct iwch_ep_common, kref);
|
||||
PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]);
|
||||
PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]);
|
||||
kfree(epc);
|
||||
}
|
||||
|
||||
static void release_ep_resources(struct iwch_ep *ep)
|
||||
{
|
||||
PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
|
||||
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
|
||||
dst_release(ep->dst);
|
||||
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
|
||||
@ -349,7 +349,7 @@ static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu)
|
||||
|
||||
static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
|
||||
{
|
||||
PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
|
||||
PDBG("%s t3cdev %p\n", __func__, dev);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
@ -370,7 +370,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct cpl_abort_req *req = cplhdr(skb);
|
||||
|
||||
PDBG("%s t3cdev %p\n", __FUNCTION__, dev);
|
||||
PDBG("%s t3cdev %p\n", __func__, dev);
|
||||
req->cmd = CPL_ABORT_NO_RST;
|
||||
cxgb3_ofld_send(dev, skb);
|
||||
}
|
||||
@ -380,10 +380,10 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp)
|
||||
struct cpl_close_con_req *req;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
skb = get_skb(NULL, sizeof(*req), gfp);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb->priority = CPL_PRIORITY_DATA;
|
||||
@ -400,11 +400,11 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
||||
{
|
||||
struct cpl_abort_req *req;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
skb = get_skb(skb, sizeof(*req), gfp);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb->priority = CPL_PRIORITY_DATA;
|
||||
@ -426,12 +426,12 @@ static int send_connect(struct iwch_ep *ep)
|
||||
unsigned int mtu_idx;
|
||||
int wscale;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
|
||||
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst));
|
||||
@ -470,7 +470,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
struct mpa_message *mpa;
|
||||
int len;
|
||||
|
||||
PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen);
|
||||
PDBG("%s ep %p pd_len %d\n", __func__, ep, ep->plen);
|
||||
|
||||
BUG_ON(skb_cloned(skb));
|
||||
|
||||
@ -530,13 +530,13 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
|
||||
struct mpa_message *mpa;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
|
||||
PDBG("%s ep %p plen %d\n", __func__, ep, plen);
|
||||
|
||||
mpalen = sizeof(*mpa) + plen;
|
||||
|
||||
skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb_reserve(skb, sizeof(*req));
|
||||
@ -580,13 +580,13 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
|
||||
int len;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen);
|
||||
PDBG("%s ep %p plen %d\n", __func__, ep, plen);
|
||||
|
||||
mpalen = sizeof(*mpa) + plen;
|
||||
|
||||
skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
skb->priority = CPL_PRIORITY_DATA;
|
||||
@ -630,7 +630,7 @@ static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct cpl_act_establish *req = cplhdr(skb);
|
||||
unsigned int tid = GET_TID(req);
|
||||
|
||||
PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid);
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, tid);
|
||||
|
||||
dst_confirm(ep->dst);
|
||||
|
||||
@ -663,7 +663,7 @@ static void close_complete_upcall(struct iwch_ep *ep)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_CLOSE;
|
||||
if (ep->com.cm_id) {
|
||||
@ -680,7 +680,7 @@ static void peer_close_upcall(struct iwch_ep *ep)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_DISCONNECT;
|
||||
if (ep->com.cm_id) {
|
||||
@ -694,7 +694,7 @@ static void peer_abort_upcall(struct iwch_ep *ep)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_CLOSE;
|
||||
event.status = -ECONNRESET;
|
||||
@ -712,7 +712,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status);
|
||||
PDBG("%s ep %p status %d\n", __func__, ep, status);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_CONNECT_REPLY;
|
||||
event.status = status;
|
||||
@ -724,7 +724,7 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
|
||||
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
|
||||
}
|
||||
if (ep->com.cm_id) {
|
||||
PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep,
|
||||
PDBG("%s ep %p tid %d status %d\n", __func__, ep,
|
||||
ep->hwtid, status);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
}
|
||||
@ -739,7 +739,7 @@ static void connect_request_upcall(struct iwch_ep *ep)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_CONNECT_REQUEST;
|
||||
event.local_addr = ep->com.local_addr;
|
||||
@ -759,11 +759,11 @@ static void established_upcall(struct iwch_ep *ep)
|
||||
{
|
||||
struct iw_cm_event event;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
memset(&event, 0, sizeof(event));
|
||||
event.event = IW_CM_EVENT_ESTABLISHED;
|
||||
if (ep->com.cm_id) {
|
||||
PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid);
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
|
||||
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
|
||||
}
|
||||
}
|
||||
@ -773,7 +773,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits)
|
||||
struct cpl_rx_data_ack *req;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
|
||||
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
|
||||
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
|
||||
@ -797,7 +797,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
enum iwch_qp_attr_mask mask;
|
||||
int err;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
|
||||
/*
|
||||
* Stop mpa timer. If it expired, then the state has
|
||||
@ -884,7 +884,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
|
||||
ep->mpa_attr.version = mpa_rev;
|
||||
PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
|
||||
"xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
|
||||
"xmit_marker_enabled=%d, version=%d\n", __func__,
|
||||
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
|
||||
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
|
||||
|
||||
@ -915,7 +915,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
struct mpa_message *mpa;
|
||||
u16 plen;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
|
||||
/*
|
||||
* Stop mpa timer. If it expired, then the state has
|
||||
@ -935,7 +935,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
|
||||
PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
|
||||
|
||||
/*
|
||||
* Copy the new data into our accumulation buffer.
|
||||
@ -950,7 +950,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
*/
|
||||
if (ep->mpa_pkt_len < sizeof(*mpa))
|
||||
return;
|
||||
PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__);
|
||||
PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
|
||||
mpa = (struct mpa_message *) ep->mpa_pkt;
|
||||
|
||||
/*
|
||||
@ -1000,7 +1000,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
|
||||
ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
|
||||
ep->mpa_attr.version = mpa_rev;
|
||||
PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
|
||||
"xmit_marker_enabled=%d, version=%d\n", __FUNCTION__,
|
||||
"xmit_marker_enabled=%d, version=%d\n", __func__,
|
||||
ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
|
||||
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
|
||||
|
||||
@ -1017,7 +1017,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct cpl_rx_data *hdr = cplhdr(skb);
|
||||
unsigned int dlen = ntohs(hdr->len);
|
||||
|
||||
PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen);
|
||||
PDBG("%s ep %p dlen %u\n", __func__, ep, dlen);
|
||||
|
||||
skb_pull(skb, sizeof(*hdr));
|
||||
skb_trim(skb, dlen);
|
||||
@ -1037,7 +1037,7 @@ static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
default:
|
||||
printk(KERN_ERR MOD "%s Unexpected streaming data."
|
||||
" ep %p state %d tid %d\n",
|
||||
__FUNCTION__, ep, state_read(&ep->com), ep->hwtid);
|
||||
__func__, ep, state_read(&ep->com), ep->hwtid);
|
||||
|
||||
/*
|
||||
* The ep will timeout and inform the ULP of the failure.
|
||||
@ -1063,7 +1063,7 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct cpl_wr_ack *hdr = cplhdr(skb);
|
||||
unsigned int credits = ntohs(hdr->credits);
|
||||
|
||||
PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits);
|
||||
PDBG("%s ep %p credits %u\n", __func__, ep, credits);
|
||||
|
||||
if (credits == 0)
|
||||
return CPL_RET_BUF_DONE;
|
||||
@ -1084,7 +1084,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
{
|
||||
struct iwch_ep *ep = ctx;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
|
||||
/*
|
||||
* We get 2 abort replies from the HW. The first one must
|
||||
@ -1115,7 +1115,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct iwch_ep *ep = ctx;
|
||||
struct cpl_act_open_rpl *rpl = cplhdr(skb);
|
||||
|
||||
PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status,
|
||||
PDBG("%s ep %p status %u errno %d\n", __func__, ep, rpl->status,
|
||||
status2errno(rpl->status));
|
||||
connect_reply_upcall(ep, status2errno(rpl->status));
|
||||
state_set(&ep->com, DEAD);
|
||||
@ -1133,7 +1133,7 @@ static int listen_start(struct iwch_listen_ep *ep)
|
||||
struct sk_buff *skb;
|
||||
struct cpl_pass_open_req *req;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n");
|
||||
@ -1162,7 +1162,7 @@ static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct iwch_listen_ep *ep = ctx;
|
||||
struct cpl_pass_open_rpl *rpl = cplhdr(skb);
|
||||
|
||||
PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep,
|
||||
PDBG("%s ep %p status %d error %d\n", __func__, ep,
|
||||
rpl->status, status2errno(rpl->status));
|
||||
ep->com.rpl_err = status2errno(rpl->status);
|
||||
ep->com.rpl_done = 1;
|
||||
@ -1176,10 +1176,10 @@ static int listen_stop(struct iwch_listen_ep *ep)
|
||||
struct sk_buff *skb;
|
||||
struct cpl_close_listserv_req *req;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req));
|
||||
@ -1197,7 +1197,7 @@ static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb,
|
||||
struct iwch_listen_ep *ep = ctx;
|
||||
struct cpl_close_listserv_rpl *rpl = cplhdr(skb);
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->com.rpl_err = status2errno(rpl->status);
|
||||
ep->com.rpl_done = 1;
|
||||
wake_up(&ep->com.waitq);
|
||||
@ -1211,7 +1211,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
|
||||
u32 opt0h, opt0l, opt2;
|
||||
int wscale;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
BUG_ON(skb_cloned(skb));
|
||||
skb_trim(skb, sizeof(*rpl));
|
||||
skb_get(skb);
|
||||
@ -1244,7 +1244,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb)
|
||||
static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid,
|
||||
PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__, tdev, hwtid,
|
||||
peer_ip);
|
||||
BUG_ON(skb_cloned(skb));
|
||||
skb_trim(skb, sizeof(struct cpl_tid_release));
|
||||
@ -1279,11 +1279,11 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct rtable *rt;
|
||||
struct iff_mac tim;
|
||||
|
||||
PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid);
|
||||
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
|
||||
|
||||
if (state_read(&parent_ep->com) != LISTEN) {
|
||||
printk(KERN_ERR "%s - listening ep not in LISTEN\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
goto reject;
|
||||
}
|
||||
|
||||
@ -1295,7 +1295,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
|
||||
printk(KERN_ERR
|
||||
"%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
|
||||
__FUNCTION__,
|
||||
__func__,
|
||||
req->dst_mac[0],
|
||||
req->dst_mac[1],
|
||||
req->dst_mac[2],
|
||||
@ -1313,21 +1313,21 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid)));
|
||||
if (!rt) {
|
||||
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
goto reject;
|
||||
}
|
||||
dst = &rt->u.dst;
|
||||
l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev);
|
||||
if (!l2t) {
|
||||
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
dst_release(dst);
|
||||
goto reject;
|
||||
}
|
||||
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
|
||||
if (!child_ep) {
|
||||
printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
l2t_release(L2DATA(tdev), l2t);
|
||||
dst_release(dst);
|
||||
goto reject;
|
||||
@ -1362,7 +1362,7 @@ static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct iwch_ep *ep = ctx;
|
||||
struct cpl_pass_establish *req = cplhdr(skb);
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->snd_seq = ntohl(req->snd_isn);
|
||||
ep->rcv_seq = ntohl(req->rcv_isn);
|
||||
|
||||
@ -1383,7 +1383,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
int disconnect = 1;
|
||||
int release = 0;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
dst_confirm(ep->dst);
|
||||
|
||||
spin_lock_irqsave(&ep->com.lock, flags);
|
||||
@ -1473,7 +1473,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
int state;
|
||||
|
||||
if (is_neg_adv_abort(req->status)) {
|
||||
PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep,
|
||||
PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
|
||||
ep->hwtid);
|
||||
t3_l2t_send_event(ep->com.tdev, ep->l2t);
|
||||
return CPL_RET_BUF_DONE;
|
||||
@ -1489,7 +1489,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
}
|
||||
|
||||
state = state_read(&ep->com);
|
||||
PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state);
|
||||
PDBG("%s ep %p state %u\n", __func__, ep, state);
|
||||
switch (state) {
|
||||
case CONNECTING:
|
||||
break;
|
||||
@ -1528,14 +1528,14 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
if (ret)
|
||||
printk(KERN_ERR MOD
|
||||
"%s - qp <- error failed!\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
}
|
||||
peer_abort_upcall(ep);
|
||||
break;
|
||||
case ABORTING:
|
||||
break;
|
||||
case DEAD:
|
||||
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__);
|
||||
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
|
||||
return CPL_RET_BUF_DONE;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
@ -1546,7 +1546,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
|
||||
if (!rpl_skb) {
|
||||
printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
|
||||
__FUNCTION__);
|
||||
__func__);
|
||||
dst_release(ep->dst);
|
||||
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
|
||||
put_ep(&ep->com);
|
||||
@ -1573,7 +1573,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
unsigned long flags;
|
||||
int release = 0;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
BUG_ON(!ep);
|
||||
|
||||
/* The cm_id may be null if we failed to connect */
|
||||
@ -1624,9 +1624,9 @@ static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
{
|
||||
struct iwch_ep *ep = ctx;
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
skb_pull(skb, sizeof(struct cpl_rdma_terminate));
|
||||
PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len);
|
||||
PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
|
||||
skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
|
||||
skb->len);
|
||||
ep->com.qp->attr.terminate_msg_len = skb->len;
|
||||
@ -1639,13 +1639,13 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
struct cpl_rdma_ec_status *rep = cplhdr(skb);
|
||||
struct iwch_ep *ep = ctx;
|
||||
|
||||
PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid,
|
||||
PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid,
|
||||
rep->status);
|
||||
if (rep->status) {
|
||||
struct iwch_qp_attributes attrs;
|
||||
|
||||
printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
|
||||
__FUNCTION__, ep->hwtid);
|
||||
__func__, ep->hwtid);
|
||||
stop_ep_timer(ep);
|
||||
attrs.next_state = IWCH_QP_STATE_ERROR;
|
||||
iwch_modify_qp(ep->com.qp->rhp,
|
||||
@ -1663,7 +1663,7 @@ static void ep_timeout(unsigned long arg)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ep->com.lock, flags);
|
||||
PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid,
|
||||
PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
|
||||
ep->com.state);
|
||||
switch (ep->com.state) {
|
||||
case MPA_REQ_SENT:
|
||||
@ -1693,7 +1693,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
{
|
||||
int err;
|
||||
struct iwch_ep *ep = to_ep(cm_id);
|
||||
PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
if (state_read(&ep->com) == DEAD) {
|
||||
put_ep(&ep->com);
|
||||
@ -1718,7 +1718,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
struct iwch_dev *h = to_iwch_dev(cm_id->device);
|
||||
struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid);
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
if (state_read(&ep->com) == DEAD)
|
||||
return -ECONNRESET;
|
||||
|
||||
@ -1739,7 +1739,7 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
ep->com.rpl_err = 0;
|
||||
ep->ird = conn_param->ird;
|
||||
ep->ord = conn_param->ord;
|
||||
PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord);
|
||||
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
|
||||
|
||||
get_ep(&ep->com);
|
||||
|
||||
@ -1810,7 +1810,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
|
||||
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
|
||||
if (!ep) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1827,7 +1827,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
ep->com.cm_id = cm_id;
|
||||
ep->com.qp = get_qhp(h, conn_param->qpn);
|
||||
BUG_ON(!ep->com.qp);
|
||||
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn,
|
||||
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
|
||||
ep->com.qp, cm_id);
|
||||
|
||||
/*
|
||||
@ -1835,7 +1835,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
*/
|
||||
ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep);
|
||||
if (ep->atid == -1) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
@ -1847,7 +1847,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
cm_id->local_addr.sin_port,
|
||||
cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
|
||||
if (!rt) {
|
||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
|
||||
err = -EHOSTUNREACH;
|
||||
goto fail3;
|
||||
}
|
||||
@ -1857,7 +1857,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour,
|
||||
ep->dst->neighbour->dev);
|
||||
if (!ep->l2t) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail4;
|
||||
}
|
||||
@ -1894,11 +1894,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
|
||||
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
|
||||
if (!ep) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
ep->com.tdev = h->rdev.t3cdev_p;
|
||||
cm_id->add_ref(cm_id);
|
||||
ep->com.cm_id = cm_id;
|
||||
@ -1910,7 +1910,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
*/
|
||||
ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep);
|
||||
if (ep->stid == -1) {
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__);
|
||||
printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
@ -1942,7 +1942,7 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id)
|
||||
int err;
|
||||
struct iwch_listen_ep *ep = to_listen_ep(cm_id);
|
||||
|
||||
PDBG("%s ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s ep %p\n", __func__, ep);
|
||||
|
||||
might_sleep();
|
||||
state_set(&ep->com, DEAD);
|
||||
@ -1965,11 +1965,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
|
||||
|
||||
spin_lock_irqsave(&ep->com.lock, flags);
|
||||
|
||||
PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep,
|
||||
PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
|
||||
states[ep->com.state], abrupt);
|
||||
|
||||
if (ep->com.state == DEAD) {
|
||||
PDBG("%s already dead ep %p\n", __FUNCTION__, ep);
|
||||
PDBG("%s already dead ep %p\n", __func__, ep);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2020,7 +2020,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
|
||||
if (ep->dst != old)
|
||||
return 0;
|
||||
|
||||
PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new,
|
||||
PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
|
||||
l2t);
|
||||
dst_hold(new);
|
||||
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
|
||||
|
@ -54,13 +54,13 @@
|
||||
#define MPA_FLAGS_MASK 0xE0
|
||||
|
||||
#define put_ep(ep) { \
|
||||
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __FUNCTION__, __LINE__, \
|
||||
PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
|
||||
ep, atomic_read(&((ep)->kref.refcount))); \
|
||||
kref_put(&((ep)->kref), __free_ep); \
|
||||
}
|
||||
|
||||
#define get_ep(ep) { \
|
||||
PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __FUNCTION__, __LINE__, \
|
||||
PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
|
||||
ep, atomic_read(&((ep)->kref.refcount))); \
|
||||
kref_get(&((ep)->kref)); \
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
|
||||
ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
|
||||
&credit);
|
||||
if (t3a_device(chp->rhp) && credit) {
|
||||
PDBG("%s updating %d cq credits on id %d\n", __FUNCTION__,
|
||||
PDBG("%s updating %d cq credits on id %d\n", __func__,
|
||||
credit, chp->cq.cqid);
|
||||
cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
|
||||
}
|
||||
@ -83,7 +83,7 @@ static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
|
||||
wc->vendor_err = CQE_STATUS(cqe);
|
||||
|
||||
PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
|
||||
"lo 0x%x cookie 0x%llx\n", __FUNCTION__,
|
||||
"lo 0x%x cookie 0x%llx\n", __func__,
|
||||
CQE_QPID(cqe), CQE_TYPE(cqe),
|
||||
CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
|
||||
CQE_WRID_LOW(cqe), (unsigned long long) cookie);
|
||||
|
@ -52,7 +52,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
||||
|
||||
if (!qhp) {
|
||||
printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
|
||||
__FUNCTION__, CQE_STATUS(rsp_msg->cqe),
|
||||
__func__, CQE_STATUS(rsp_msg->cqe),
|
||||
CQE_QPID(rsp_msg->cqe));
|
||||
spin_unlock(&rnicp->lock);
|
||||
return;
|
||||
@ -61,14 +61,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
|
||||
if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
|
||||
(qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
|
||||
PDBG("%s AE received after RTS - "
|
||||
"qp state %d qpid 0x%x status 0x%x\n", __FUNCTION__,
|
||||
"qp state %d qpid 0x%x status 0x%x\n", __func__,
|
||||
qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
|
||||
spin_unlock(&rnicp->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
|
||||
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
|
||||
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
|
||||
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
|
||||
@ -132,10 +132,10 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
||||
(CQE_STATUS(rsp_msg->cqe) == 0)) {
|
||||
if (SQ_TYPE(rsp_msg->cqe)) {
|
||||
PDBG("%s QPID 0x%x ep %p disconnecting\n",
|
||||
__FUNCTION__, qhp->wq.qpid, qhp->ep);
|
||||
__func__, qhp->wq.qpid, qhp->ep);
|
||||
iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
|
||||
} else {
|
||||
PDBG("%s post REQ_ERR AE QPID 0x%x\n", __FUNCTION__,
|
||||
PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
|
||||
qhp->wq.qpid);
|
||||
post_qp_event(rnicp, chp, rsp_msg,
|
||||
IB_EVENT_QP_REQ_ERR, 0);
|
||||
@ -180,7 +180,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
|
||||
case TPT_ERR_INVALIDATE_SHARED_MR:
|
||||
case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
|
||||
printk(KERN_ERR "%s - CQE Err qpid 0x%x opcode %d status 0x%x "
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __FUNCTION__,
|
||||
"type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
|
||||
CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
|
||||
CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
|
||||
CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
|
||||
|
@ -62,7 +62,7 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
|
||||
PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
|
||||
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
|
||||
PDBG("%s mmid 0x%x mhp %p\n", __FUNCTION__, mmid, mhp);
|
||||
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
|
||||
((u64) j << *shift));
|
||||
|
||||
PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
|
||||
__FUNCTION__, (unsigned long long) *iova_start,
|
||||
__func__, (unsigned long long) *iova_start,
|
||||
(unsigned long long) mask, *shift, (unsigned long long) *total_size,
|
||||
*npages);
|
||||
|
||||
|
@ -101,7 +101,7 @@ static int iwch_dealloc_ucontext(struct ib_ucontext *context)
|
||||
struct iwch_ucontext *ucontext = to_iwch_ucontext(context);
|
||||
struct iwch_mm_entry *mm, *tmp;
|
||||
|
||||
PDBG("%s context %p\n", __FUNCTION__, context);
|
||||
PDBG("%s context %p\n", __func__, context);
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
cxio_release_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
@ -115,7 +115,7 @@ static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct iwch_ucontext *context;
|
||||
struct iwch_dev *rhp = to_iwch_dev(ibdev);
|
||||
|
||||
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -129,7 +129,7 @@ static int iwch_destroy_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
struct iwch_cq *chp;
|
||||
|
||||
PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq);
|
||||
PDBG("%s ib_cq %p\n", __func__, ib_cq);
|
||||
chp = to_iwch_cq(ib_cq);
|
||||
|
||||
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
|
||||
@ -151,7 +151,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
|
||||
struct iwch_create_cq_req ureq;
|
||||
struct iwch_ucontext *ucontext = NULL;
|
||||
|
||||
PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries);
|
||||
PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
|
||||
rhp = to_iwch_dev(ibdev);
|
||||
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
|
||||
if (!chp)
|
||||
@ -233,7 +233,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
struct t3_cq oldcq, newcq;
|
||||
int ret;
|
||||
|
||||
PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe);
|
||||
PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);
|
||||
|
||||
/* We don't downsize... */
|
||||
if (cqe <= cq->cqe)
|
||||
@ -281,7 +281,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
|
||||
if (ret) {
|
||||
printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
|
||||
__FUNCTION__, ret);
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
/* add user hooks here */
|
||||
@ -316,7 +316,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
|
||||
chp->cq.rptr = rptr;
|
||||
} else
|
||||
spin_lock_irqsave(&chp->lock, flag);
|
||||
PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
|
||||
PDBG("%s rptr 0x%x\n", __func__, chp->cq.rptr);
|
||||
err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
|
||||
spin_unlock_irqrestore(&chp->lock, flag);
|
||||
if (err < 0)
|
||||
@ -337,7 +337,7 @@ static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
struct iwch_ucontext *ucontext;
|
||||
u64 addr;
|
||||
|
||||
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff,
|
||||
PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
|
||||
key, len);
|
||||
|
||||
if (vma->vm_start & (PAGE_SIZE-1)) {
|
||||
@ -390,7 +390,7 @@ static int iwch_deallocate_pd(struct ib_pd *pd)
|
||||
|
||||
php = to_iwch_pd(pd);
|
||||
rhp = php->rhp;
|
||||
PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid);
|
||||
PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
|
||||
cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid);
|
||||
kfree(php);
|
||||
return 0;
|
||||
@ -404,7 +404,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
|
||||
u32 pdid;
|
||||
struct iwch_dev *rhp;
|
||||
|
||||
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
rhp = (struct iwch_dev *) ibdev;
|
||||
pdid = cxio_hal_get_pdid(rhp->rdev.rscp);
|
||||
if (!pdid)
|
||||
@ -422,7 +422,7 @@ static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev,
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
}
|
||||
PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php);
|
||||
PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
|
||||
return &php->ibpd;
|
||||
}
|
||||
|
||||
@ -432,7 +432,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
|
||||
struct iwch_mr *mhp;
|
||||
u32 mmid;
|
||||
|
||||
PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr);
|
||||
PDBG("%s ib_mr %p\n", __func__, ib_mr);
|
||||
/* There can be no memory windows */
|
||||
if (atomic_read(&ib_mr->usecnt))
|
||||
return -EINVAL;
|
||||
@ -447,7 +447,7 @@ static int iwch_dereg_mr(struct ib_mr *ib_mr)
|
||||
kfree((void *) (unsigned long) mhp->kva);
|
||||
if (mhp->umem)
|
||||
ib_umem_release(mhp->umem);
|
||||
PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp);
|
||||
PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
|
||||
kfree(mhp);
|
||||
return 0;
|
||||
}
|
||||
@ -467,7 +467,7 @@ static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd,
|
||||
struct iwch_mr *mhp;
|
||||
int ret;
|
||||
|
||||
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
|
||||
PDBG("%s ib_pd %p\n", __func__, pd);
|
||||
php = to_iwch_pd(pd);
|
||||
rhp = php->rhp;
|
||||
|
||||
@ -531,7 +531,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr,
|
||||
int npages;
|
||||
int ret;
|
||||
|
||||
PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd);
|
||||
PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd);
|
||||
|
||||
/* There can be no memory windows */
|
||||
if (atomic_read(&mr->usecnt))
|
||||
@ -594,7 +594,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
struct iwch_mr *mhp;
|
||||
struct iwch_reg_user_mr_resp uresp;
|
||||
|
||||
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
|
||||
PDBG("%s ib_pd %p\n", __func__, pd);
|
||||
|
||||
php = to_iwch_pd(pd);
|
||||
rhp = php->rhp;
|
||||
@ -649,7 +649,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (udata && !t3a_device(rhp)) {
|
||||
uresp.pbl_addr = (mhp->attr.pbl_addr -
|
||||
rhp->rdev.rnic_info.pbl_base) >> 3;
|
||||
PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
|
||||
PDBG("%s user resp pbl_addr 0x%x\n", __func__,
|
||||
uresp.pbl_addr);
|
||||
|
||||
if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
|
||||
@ -673,7 +673,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
u64 kva;
|
||||
struct ib_mr *ibmr;
|
||||
|
||||
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
|
||||
PDBG("%s ib_pd %p\n", __func__, pd);
|
||||
|
||||
/*
|
||||
* T3 only supports 32 bits of size.
|
||||
@ -710,7 +710,7 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
|
||||
mhp->attr.stag = stag;
|
||||
mmid = (stag) >> 8;
|
||||
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
|
||||
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag);
|
||||
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||
return &(mhp->ibmw);
|
||||
}
|
||||
|
||||
@ -726,7 +726,7 @@ static int iwch_dealloc_mw(struct ib_mw *mw)
|
||||
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
kfree(mhp);
|
||||
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp);
|
||||
PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -754,7 +754,7 @@ static int iwch_destroy_qp(struct ib_qp *ib_qp)
|
||||
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
|
||||
PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__,
|
||||
PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__,
|
||||
ib_qp, qhp->wq.qpid, qhp);
|
||||
kfree(qhp);
|
||||
return 0;
|
||||
@ -773,7 +773,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
int wqsize, sqsize, rqsize;
|
||||
struct iwch_ucontext *ucontext;
|
||||
|
||||
PDBG("%s ib_pd %p\n", __FUNCTION__, pd);
|
||||
PDBG("%s ib_pd %p\n", __func__, pd);
|
||||
if (attrs->qp_type != IB_QPT_RC)
|
||||
return ERR_PTR(-EINVAL);
|
||||
php = to_iwch_pd(pd);
|
||||
@ -805,7 +805,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
*/
|
||||
sqsize = roundup_pow_of_two(attrs->cap.max_send_wr);
|
||||
wqsize = roundup_pow_of_two(rqsize + sqsize);
|
||||
PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__,
|
||||
PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__,
|
||||
wqsize, sqsize, rqsize);
|
||||
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
|
||||
if (!qhp)
|
||||
@ -898,7 +898,7 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
init_timer(&(qhp->timer));
|
||||
PDBG("%s sq_num_entries %d, rq_num_entries %d "
|
||||
"qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
|
||||
__FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
|
||||
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
|
||||
qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr,
|
||||
1 << qhp->wq.size_log2);
|
||||
return &qhp->ibqp;
|
||||
@ -912,7 +912,7 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
enum iwch_qp_attr_mask mask = 0;
|
||||
struct iwch_qp_attributes attrs;
|
||||
|
||||
PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp);
|
||||
PDBG("%s ib_qp %p\n", __func__, ibqp);
|
||||
|
||||
/* iwarp does not support the RTR state */
|
||||
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
|
||||
@ -945,20 +945,20 @@ static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
void iwch_qp_add_ref(struct ib_qp *qp)
|
||||
{
|
||||
PDBG("%s ib_qp %p\n", __FUNCTION__, qp);
|
||||
PDBG("%s ib_qp %p\n", __func__, qp);
|
||||
atomic_inc(&(to_iwch_qp(qp)->refcnt));
|
||||
}
|
||||
|
||||
void iwch_qp_rem_ref(struct ib_qp *qp)
|
||||
{
|
||||
PDBG("%s ib_qp %p\n", __FUNCTION__, qp);
|
||||
PDBG("%s ib_qp %p\n", __func__, qp);
|
||||
if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt)))
|
||||
wake_up(&(to_iwch_qp(qp)->wait));
|
||||
}
|
||||
|
||||
static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
|
||||
{
|
||||
PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn);
|
||||
PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
|
||||
return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
|
||||
}
|
||||
|
||||
@ -966,7 +966,7 @@ static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
|
||||
static int iwch_query_pkey(struct ib_device *ibdev,
|
||||
u8 port, u16 index, u16 * pkey)
|
||||
{
|
||||
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
*pkey = 0;
|
||||
return 0;
|
||||
}
|
||||
@ -977,7 +977,7 @@ static int iwch_query_gid(struct ib_device *ibdev, u8 port,
|
||||
struct iwch_dev *dev;
|
||||
|
||||
PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
|
||||
__FUNCTION__, ibdev, port, index, gid);
|
||||
__func__, ibdev, port, index, gid);
|
||||
dev = to_iwch_dev(ibdev);
|
||||
BUG_ON(port == 0 || port > 2);
|
||||
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
|
||||
@ -990,7 +990,7 @@ static int iwch_query_device(struct ib_device *ibdev,
|
||||
{
|
||||
|
||||
struct iwch_dev *dev;
|
||||
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
|
||||
dev = to_iwch_dev(ibdev);
|
||||
memset(props, 0, sizeof *props);
|
||||
@ -1017,7 +1017,7 @@ static int iwch_query_device(struct ib_device *ibdev,
|
||||
static int iwch_query_port(struct ib_device *ibdev,
|
||||
u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
PDBG("%s ibdev %p\n", __FUNCTION__, ibdev);
|
||||
PDBG("%s ibdev %p\n", __func__, ibdev);
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
props->lid = 0;
|
||||
props->lmc = 0;
|
||||
@ -1045,7 +1045,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
|
||||
ibdev.class_dev);
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
|
||||
PDBG("%s class dev 0x%p\n", __func__, cdev);
|
||||
return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type);
|
||||
}
|
||||
|
||||
@ -1056,7 +1056,7 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
|
||||
struct ethtool_drvinfo info;
|
||||
struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
|
||||
PDBG("%s class dev 0x%p\n", __func__, cdev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
@ -1070,7 +1070,7 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
|
||||
struct ethtool_drvinfo info;
|
||||
struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
|
||||
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
|
||||
PDBG("%s class dev 0x%p\n", __func__, cdev);
|
||||
rtnl_lock();
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
rtnl_unlock();
|
||||
@ -1081,7 +1081,7 @@ static ssize_t show_board(struct class_device *cdev, char *buf)
|
||||
{
|
||||
struct iwch_dev *dev = container_of(cdev, struct iwch_dev,
|
||||
ibdev.class_dev);
|
||||
PDBG("%s class dev 0x%p\n", __FUNCTION__, dev);
|
||||
PDBG("%s class dev 0x%p\n", __func__, dev);
|
||||
return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor,
|
||||
dev->rdev.rnic_info.pdev->device);
|
||||
}
|
||||
@ -1103,14 +1103,13 @@ int iwch_register_device(struct iwch_dev *dev)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
PDBG("%s iwch_dev %p\n", __FUNCTION__, dev);
|
||||
PDBG("%s iwch_dev %p\n", __func__, dev);
|
||||
strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
|
||||
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
||||
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
|
||||
dev->ibdev.owner = THIS_MODULE;
|
||||
dev->device_cap_flags =
|
||||
(IB_DEVICE_ZERO_STAG |
|
||||
IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW);
|
||||
(IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW);
|
||||
|
||||
dev->ibdev.uverbs_cmd_mask =
|
||||
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
|
||||
@ -1207,7 +1206,7 @@ void iwch_unregister_device(struct iwch_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
PDBG("%s iwch_dev %p\n", __FUNCTION__, dev);
|
||||
PDBG("%s iwch_dev %p\n", __func__, dev);
|
||||
for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
|
||||
class_device_remove_file(&dev->ibdev.class_dev,
|
||||
iwch_class_attributes[i]);
|
||||
|
@ -213,7 +213,7 @@ static inline struct iwch_mm_entry *remove_mmap(struct iwch_ucontext *ucontext,
|
||||
if (mm->key == key && mm->len == len) {
|
||||
list_del_init(&mm->entry);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
|
||||
PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
|
||||
key, (unsigned long long) mm->addr, mm->len);
|
||||
return mm;
|
||||
}
|
||||
@ -226,7 +226,7 @@ static inline void insert_mmap(struct iwch_ucontext *ucontext,
|
||||
struct iwch_mm_entry *mm)
|
||||
{
|
||||
spin_lock(&ucontext->mmap_lock);
|
||||
PDBG("%s key 0x%x addr 0x%llx len %d\n", __FUNCTION__,
|
||||
PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
|
||||
mm->key, (unsigned long long) mm->addr, mm->len);
|
||||
list_add_tail(&mm->entry, &ucontext->mmaps);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
|
@ -72,7 +72,7 @@ static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
wqe->send.reserved[2] = 0;
|
||||
if (wr->opcode == IB_WR_SEND_WITH_IMM) {
|
||||
plen = 4;
|
||||
wqe->send.sgl[0].stag = wr->imm_data;
|
||||
wqe->send.sgl[0].stag = wr->ex.imm_data;
|
||||
wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
|
||||
wqe->send.num_sgle = __constant_cpu_to_be32(0);
|
||||
*flit_cnt = 5;
|
||||
@ -112,7 +112,7 @@ static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
|
||||
|
||||
if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
|
||||
plen = 4;
|
||||
wqe->write.sgl[0].stag = wr->imm_data;
|
||||
wqe->write.sgl[0].stag = wr->ex.imm_data;
|
||||
wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
|
||||
wqe->write.num_sgle = __constant_cpu_to_be32(0);
|
||||
*flit_cnt = 6;
|
||||
@ -168,30 +168,30 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
|
||||
|
||||
mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
|
||||
if (!mhp) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EIO;
|
||||
}
|
||||
if (!mhp->attr.state) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EIO;
|
||||
}
|
||||
if (mhp->attr.zbva) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (sg_list[i].addr < mhp->attr.va_fbo) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sg_list[i].addr + ((u64) sg_list[i].length) <
|
||||
sg_list[i].addr) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (sg_list[i].addr + ((u64) sg_list[i].length) >
|
||||
mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
return -EINVAL;
|
||||
}
|
||||
offset = sg_list[i].addr - mhp->attr.va_fbo;
|
||||
@ -290,7 +290,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
qhp->wq.oldest_read = sqp;
|
||||
break;
|
||||
default:
|
||||
PDBG("%s post of type=%d TBD!\n", __FUNCTION__,
|
||||
PDBG("%s post of type=%d TBD!\n", __func__,
|
||||
wr->opcode);
|
||||
err = -EINVAL;
|
||||
}
|
||||
@ -309,7 +309,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
|
||||
0, t3_wr_flit_cnt);
|
||||
PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
|
||||
__FUNCTION__, (unsigned long long) wr->wr_id, idx,
|
||||
__func__, (unsigned long long) wr->wr_id, idx,
|
||||
Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
|
||||
sqp->opcode);
|
||||
wr = wr->next;
|
||||
@ -361,7 +361,7 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
|
||||
0, sizeof(struct t3_receive_wr) >> 3);
|
||||
PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
|
||||
"wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id,
|
||||
"wqe %p \n", __func__, (unsigned long long) wr->wr_id,
|
||||
idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
|
||||
++(qhp->wq.rq_wptr);
|
||||
++(qhp->wq.wptr);
|
||||
@ -407,7 +407,7 @@ int iwch_bind_mw(struct ib_qp *qp,
|
||||
return -ENOMEM;
|
||||
}
|
||||
idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
|
||||
PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx,
|
||||
PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
|
||||
mw, mw_bind);
|
||||
wqe = (union t3_wr *) (qhp->wq.queue + idx);
|
||||
|
||||
@ -595,10 +595,10 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
struct terminate_message *term;
|
||||
struct sk_buff *skb;
|
||||
|
||||
PDBG("%s %d\n", __FUNCTION__, __LINE__);
|
||||
PDBG("%s %d\n", __func__, __LINE__);
|
||||
skb = alloc_skb(40, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__);
|
||||
printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
wqe = (union t3_wr *)skb_put(skb, 40);
|
||||
@ -629,7 +629,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
|
||||
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
|
||||
schp = get_chp(qhp->rhp, qhp->attr.scq);
|
||||
|
||||
PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
|
||||
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
|
||||
/* take a ref on the qhp since we must release the lock */
|
||||
atomic_inc(&qhp->refcnt);
|
||||
spin_unlock_irqrestore(&qhp->lock, *flag);
|
||||
@ -720,11 +720,11 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
|
||||
init_attr.irs = qhp->ep->rcv_seq;
|
||||
PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
|
||||
"flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
|
||||
"flags 0x%x qpcaps 0x%x\n", __func__,
|
||||
init_attr.rq_addr, init_attr.rq_size,
|
||||
init_attr.flags, init_attr.qpcaps);
|
||||
ret = cxio_rdma_init(&rhp->rdev, &init_attr);
|
||||
PDBG("%s ret %d\n", __FUNCTION__, ret);
|
||||
PDBG("%s ret %d\n", __func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -742,7 +742,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
int free = 0;
|
||||
struct iwch_ep *ep = NULL;
|
||||
|
||||
PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__,
|
||||
PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
|
||||
qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
|
||||
(mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
|
||||
|
||||
@ -899,14 +899,14 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s in a bad state %d\n",
|
||||
__FUNCTION__, qhp->attr.state);
|
||||
__func__, qhp->attr.state);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
err:
|
||||
PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep,
|
||||
PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
|
||||
qhp->wq.qpid);
|
||||
|
||||
/* disassociate the LLP connection */
|
||||
@ -939,7 +939,7 @@ out:
|
||||
if (free)
|
||||
put_ep(&ep->com);
|
||||
|
||||
PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state);
|
||||
PDBG("%s exit state %d\n", __func__, qhp->attr.state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include "ehca_tools.h"
|
||||
#include "ehca_iverbs.h"
|
||||
#include "hcp_if.h"
|
||||
@ -170,17 +167,8 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
struct ehca_av *av;
|
||||
struct ehca_ud_av new_ehca_av;
|
||||
struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
|
||||
struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
|
||||
ib_device);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&new_ehca_av, 0, sizeof(new_ehca_av));
|
||||
new_ehca_av.sl = ah_attr->sl;
|
||||
@ -242,15 +230,6 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
|
||||
int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
|
||||
{
|
||||
struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
|
||||
struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
|
||||
sizeof(ah_attr->grh.dgid));
|
||||
@ -273,16 +252,6 @@ int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
|
||||
|
||||
int ehca_destroy_ah(struct ib_ah *ah)
|
||||
{
|
||||
struct ehca_pd *my_pd = container_of(ah->pd, struct ehca_pd, ib_pd);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(ah->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
|
||||
|
||||
return 0;
|
||||
|
@ -132,7 +132,6 @@ struct ehca_shca {
|
||||
struct ehca_pd {
|
||||
struct ib_pd ib_pd;
|
||||
struct ipz_pd fw_pd;
|
||||
u32 ownpid;
|
||||
/* small queue mgmt */
|
||||
struct mutex lock;
|
||||
struct list_head free[2];
|
||||
@ -215,7 +214,6 @@ struct ehca_cq {
|
||||
atomic_t nr_events; /* #events seen */
|
||||
wait_queue_head_t wait_completion;
|
||||
spinlock_t task_lock;
|
||||
u32 ownpid;
|
||||
/* mmap counter for resources mapped into user space */
|
||||
u32 mm_count_queue;
|
||||
u32 mm_count_galpa;
|
||||
|
@ -43,8 +43,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include "ehca_iverbs.h"
|
||||
#include "ehca_classes.h"
|
||||
#include "ehca_irq.h"
|
||||
@ -148,7 +146,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
|
||||
spin_lock_init(&my_cq->task_lock);
|
||||
atomic_set(&my_cq->nr_events, 0);
|
||||
init_waitqueue_head(&my_cq->wait_completion);
|
||||
my_cq->ownpid = current->tgid;
|
||||
|
||||
cq = &my_cq->ib_cq;
|
||||
|
||||
@ -320,7 +317,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
|
||||
struct ehca_shca *shca = container_of(device, struct ehca_shca,
|
||||
ib_device);
|
||||
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
|
||||
u32 cur_pid = current->tgid;
|
||||
unsigned long flags;
|
||||
|
||||
if (cq->uobject) {
|
||||
@ -329,12 +325,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
|
||||
"user space cq_num=%x", my_cq->cq_number);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (my_cq->ownpid != cur_pid) {
|
||||
ehca_err(device, "Invalid caller pid=%x ownpid=%x "
|
||||
"cq_num=%x",
|
||||
cur_pid, my_cq->ownpid, my_cq->cq_number);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -374,15 +364,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
|
||||
|
||||
int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
|
||||
{
|
||||
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (cq->uobject && my_cq->ownpid != cur_pid) {
|
||||
ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_cq->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* TODO: proper resize needs to be done */
|
||||
ehca_err(cq->device, "not implemented yet");
|
||||
|
||||
|
@ -43,6 +43,11 @@
|
||||
#include "ehca_iverbs.h"
|
||||
#include "hcp_if.h"
|
||||
|
||||
static unsigned int limit_uint(unsigned int value)
|
||||
{
|
||||
return min_t(unsigned int, value, INT_MAX);
|
||||
}
|
||||
|
||||
int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
|
||||
{
|
||||
int i, ret = 0;
|
||||
@ -83,37 +88,40 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
|
||||
props->vendor_id = rblock->vendor_id >> 8;
|
||||
props->vendor_part_id = rblock->vendor_part_id >> 16;
|
||||
props->hw_ver = rblock->hw_ver;
|
||||
props->max_qp = min_t(unsigned, rblock->max_qp, INT_MAX);
|
||||
props->max_qp_wr = min_t(unsigned, rblock->max_wqes_wq, INT_MAX);
|
||||
props->max_sge = min_t(unsigned, rblock->max_sge, INT_MAX);
|
||||
props->max_sge_rd = min_t(unsigned, rblock->max_sge_rd, INT_MAX);
|
||||
props->max_cq = min_t(unsigned, rblock->max_cq, INT_MAX);
|
||||
props->max_cqe = min_t(unsigned, rblock->max_cqe, INT_MAX);
|
||||
props->max_mr = min_t(unsigned, rblock->max_mr, INT_MAX);
|
||||
props->max_mw = min_t(unsigned, rblock->max_mw, INT_MAX);
|
||||
props->max_pd = min_t(unsigned, rblock->max_pd, INT_MAX);
|
||||
props->max_ah = min_t(unsigned, rblock->max_ah, INT_MAX);
|
||||
props->max_fmr = min_t(unsigned, rblock->max_mr, INT_MAX);
|
||||
props->max_qp = limit_uint(rblock->max_qp);
|
||||
props->max_qp_wr = limit_uint(rblock->max_wqes_wq);
|
||||
props->max_sge = limit_uint(rblock->max_sge);
|
||||
props->max_sge_rd = limit_uint(rblock->max_sge_rd);
|
||||
props->max_cq = limit_uint(rblock->max_cq);
|
||||
props->max_cqe = limit_uint(rblock->max_cqe);
|
||||
props->max_mr = limit_uint(rblock->max_mr);
|
||||
props->max_mw = limit_uint(rblock->max_mw);
|
||||
props->max_pd = limit_uint(rblock->max_pd);
|
||||
props->max_ah = limit_uint(rblock->max_ah);
|
||||
props->max_ee = limit_uint(rblock->max_rd_ee_context);
|
||||
props->max_rdd = limit_uint(rblock->max_rd_domain);
|
||||
props->max_fmr = limit_uint(rblock->max_mr);
|
||||
props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
|
||||
props->max_qp_rd_atom = limit_uint(rblock->max_rr_qp);
|
||||
props->max_ee_rd_atom = limit_uint(rblock->max_rr_ee_context);
|
||||
props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
|
||||
props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
|
||||
props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
|
||||
|
||||
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
|
||||
props->max_srq = props->max_qp;
|
||||
props->max_srq_wr = props->max_qp_wr;
|
||||
props->max_srq = limit_uint(props->max_qp);
|
||||
props->max_srq_wr = limit_uint(props->max_qp_wr);
|
||||
props->max_srq_sge = 3;
|
||||
}
|
||||
|
||||
props->max_pkeys = 16;
|
||||
props->local_ca_ack_delay
|
||||
= rblock->local_ca_ack_delay;
|
||||
props->max_raw_ipv6_qp
|
||||
= min_t(unsigned, rblock->max_raw_ipv6_qp, INT_MAX);
|
||||
props->max_raw_ethy_qp
|
||||
= min_t(unsigned, rblock->max_raw_ethy_qp, INT_MAX);
|
||||
props->max_mcast_grp
|
||||
= min_t(unsigned, rblock->max_mcast_grp, INT_MAX);
|
||||
props->max_mcast_qp_attach
|
||||
= min_t(unsigned, rblock->max_mcast_qp_attach, INT_MAX);
|
||||
props->max_pkeys = 16;
|
||||
props->local_ca_ack_delay = limit_uint(rblock->local_ca_ack_delay);
|
||||
props->max_raw_ipv6_qp = limit_uint(rblock->max_raw_ipv6_qp);
|
||||
props->max_raw_ethy_qp = limit_uint(rblock->max_raw_ethy_qp);
|
||||
props->max_mcast_grp = limit_uint(rblock->max_mcast_grp);
|
||||
props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
|
||||
props->max_total_mcast_qp_attach
|
||||
= min_t(unsigned, rblock->max_total_mcast_qp_attach, INT_MAX);
|
||||
= limit_uint(rblock->max_total_mcast_qp_attach);
|
||||
|
||||
/* translate device capabilities */
|
||||
props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
|
||||
@ -128,6 +136,46 @@ query_device1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int map_mtu(struct ehca_shca *shca, u32 fw_mtu)
|
||||
{
|
||||
switch (fw_mtu) {
|
||||
case 0x1:
|
||||
return IB_MTU_256;
|
||||
case 0x2:
|
||||
return IB_MTU_512;
|
||||
case 0x3:
|
||||
return IB_MTU_1024;
|
||||
case 0x4:
|
||||
return IB_MTU_2048;
|
||||
case 0x5:
|
||||
return IB_MTU_4096;
|
||||
default:
|
||||
ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
|
||||
fw_mtu);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
|
||||
{
|
||||
switch (vl_cap) {
|
||||
case 0x1:
|
||||
return 1;
|
||||
case 0x2:
|
||||
return 2;
|
||||
case 0x3:
|
||||
return 4;
|
||||
case 0x4:
|
||||
return 8;
|
||||
case 0x5:
|
||||
return 15;
|
||||
default:
|
||||
ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
|
||||
vl_cap);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int ehca_query_port(struct ib_device *ibdev,
|
||||
u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
@ -152,31 +200,13 @@ int ehca_query_port(struct ib_device *ibdev,
|
||||
|
||||
memset(props, 0, sizeof(struct ib_port_attr));
|
||||
|
||||
switch (rblock->max_mtu) {
|
||||
case 0x1:
|
||||
props->active_mtu = props->max_mtu = IB_MTU_256;
|
||||
break;
|
||||
case 0x2:
|
||||
props->active_mtu = props->max_mtu = IB_MTU_512;
|
||||
break;
|
||||
case 0x3:
|
||||
props->active_mtu = props->max_mtu = IB_MTU_1024;
|
||||
break;
|
||||
case 0x4:
|
||||
props->active_mtu = props->max_mtu = IB_MTU_2048;
|
||||
break;
|
||||
case 0x5:
|
||||
props->active_mtu = props->max_mtu = IB_MTU_4096;
|
||||
break;
|
||||
default:
|
||||
ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
|
||||
rblock->max_mtu);
|
||||
break;
|
||||
}
|
||||
|
||||
props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
|
||||
props->port_cap_flags = rblock->capability_mask;
|
||||
props->gid_tbl_len = rblock->gid_tbl_len;
|
||||
props->max_msg_sz = rblock->max_msg_sz;
|
||||
if (rblock->max_msg_sz)
|
||||
props->max_msg_sz = rblock->max_msg_sz;
|
||||
else
|
||||
props->max_msg_sz = 0x1 << 31;
|
||||
props->bad_pkey_cntr = rblock->bad_pkey_cntr;
|
||||
props->qkey_viol_cntr = rblock->qkey_viol_cntr;
|
||||
props->pkey_tbl_len = rblock->pkey_tbl_len;
|
||||
@ -186,6 +216,7 @@ int ehca_query_port(struct ib_device *ibdev,
|
||||
props->sm_sl = rblock->sm_sl;
|
||||
props->subnet_timeout = rblock->subnet_timeout;
|
||||
props->init_type_reply = rblock->init_type_reply;
|
||||
props->max_vl_num = map_number_of_vls(shca, rblock->vl_cap);
|
||||
|
||||
if (rblock->state && rblock->phys_width) {
|
||||
props->phys_state = rblock->phys_pstate;
|
||||
@ -314,7 +345,7 @@ query_gid1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
const u32 allowed_port_caps = (
|
||||
static const u32 allowed_port_caps = (
|
||||
IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
|
||||
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
|
||||
IB_PORT_VENDOR_CLASS_SUP);
|
||||
|
@ -57,16 +57,17 @@ MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
|
||||
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
|
||||
MODULE_VERSION(HCAD_VERSION);
|
||||
|
||||
int ehca_open_aqp1 = 0;
|
||||
static int ehca_open_aqp1 = 0;
|
||||
static int ehca_hw_level = 0;
|
||||
static int ehca_poll_all_eqs = 1;
|
||||
static int ehca_mr_largepage = 1;
|
||||
|
||||
int ehca_debug_level = 0;
|
||||
int ehca_hw_level = 0;
|
||||
int ehca_nr_ports = 2;
|
||||
int ehca_use_hp_mr = 0;
|
||||
int ehca_port_act_time = 30;
|
||||
int ehca_poll_all_eqs = 1;
|
||||
int ehca_static_rate = -1;
|
||||
int ehca_scaling_code = 0;
|
||||
int ehca_mr_largepage = 1;
|
||||
int ehca_lock_hcalls = -1;
|
||||
|
||||
module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO);
|
||||
@ -396,7 +397,7 @@ init_node_guid1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ehca_init_device(struct ehca_shca *shca)
|
||||
static int ehca_init_device(struct ehca_shca *shca)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -579,8 +580,8 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
|
||||
return 1;
|
||||
}
|
||||
|
||||
DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
|
||||
ehca_show_debug_level, ehca_store_debug_level);
|
||||
static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
|
||||
ehca_show_debug_level, ehca_store_debug_level);
|
||||
|
||||
static struct attribute *ehca_drv_attrs[] = {
|
||||
&driver_attr_debug_level.attr,
|
||||
@ -941,7 +942,7 @@ void ehca_poll_eqs(unsigned long data)
|
||||
spin_unlock(&shca_list_lock);
|
||||
}
|
||||
|
||||
int __init ehca_module_init(void)
|
||||
static int __init ehca_module_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -988,7 +989,7 @@ module_init1:
|
||||
return ret;
|
||||
};
|
||||
|
||||
void __exit ehca_module_exit(void)
|
||||
static void __exit ehca_module_exit(void)
|
||||
{
|
||||
if (ehca_poll_all_eqs == 1)
|
||||
del_timer_sync(&poll_eqs_timer);
|
||||
|
@ -40,8 +40,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include <rdma/ib_umem.h>
|
||||
|
||||
#include "ehca_iverbs.h"
|
||||
@ -419,7 +417,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
|
||||
struct ehca_shca *shca =
|
||||
container_of(mr->device, struct ehca_shca, ib_device);
|
||||
struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
|
||||
struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
|
||||
u64 new_size;
|
||||
u64 *new_start;
|
||||
u32 new_acl;
|
||||
@ -429,15 +426,6 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
|
||||
u32 num_kpages = 0;
|
||||
u32 num_hwpages = 0;
|
||||
struct ehca_mr_pginfo pginfo;
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
(my_pd->ownpid != cur_pid)) {
|
||||
ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
ret = -EINVAL;
|
||||
goto rereg_phys_mr_exit0;
|
||||
}
|
||||
|
||||
if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
|
||||
/* TODO not supported, because PHYP rereg hCall needs pages */
|
||||
@ -577,19 +565,9 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
|
||||
struct ehca_shca *shca =
|
||||
container_of(mr->device, struct ehca_shca, ib_device);
|
||||
struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
|
||||
struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
|
||||
u32 cur_pid = current->tgid;
|
||||
unsigned long sl_flags;
|
||||
struct ehca_mr_hipzout_parms hipzout;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
(my_pd->ownpid != cur_pid)) {
|
||||
ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
ret = -EINVAL;
|
||||
goto query_mr_exit0;
|
||||
}
|
||||
|
||||
if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
|
||||
ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
|
||||
"e_mr->flags=%x", mr, e_mr, e_mr->flags);
|
||||
@ -634,16 +612,6 @@ int ehca_dereg_mr(struct ib_mr *mr)
|
||||
struct ehca_shca *shca =
|
||||
container_of(mr->device, struct ehca_shca, ib_device);
|
||||
struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
|
||||
struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
(my_pd->ownpid != cur_pid)) {
|
||||
ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
ret = -EINVAL;
|
||||
goto dereg_mr_exit0;
|
||||
}
|
||||
|
||||
if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
|
||||
ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
|
||||
@ -1952,9 +1920,8 @@ next_kpage:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
|
||||
u32 number,
|
||||
u64 *kpage)
|
||||
static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
|
||||
u32 number, u64 *kpage)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ib_phys_buf *pbuf;
|
||||
@ -2012,9 +1979,8 @@ int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
|
||||
u32 number,
|
||||
u64 *kpage)
|
||||
static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
|
||||
u32 number, u64 *kpage)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 *fmrlist;
|
||||
|
@ -38,8 +38,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include "ehca_tools.h"
|
||||
#include "ehca_iverbs.h"
|
||||
|
||||
@ -58,7 +56,6 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
pd->ownpid = current->tgid;
|
||||
for (i = 0; i < 2; i++) {
|
||||
INIT_LIST_HEAD(&pd->free[i]);
|
||||
INIT_LIST_HEAD(&pd->full[i]);
|
||||
@ -85,18 +82,10 @@ struct ib_pd *ehca_alloc_pd(struct ib_device *device,
|
||||
|
||||
int ehca_dealloc_pd(struct ib_pd *pd)
|
||||
{
|
||||
u32 cur_pid = current->tgid;
|
||||
struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
|
||||
int i, leftovers = 0;
|
||||
struct ipz_small_queue_page *page, *tmp;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(pd->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
list_splice(&my_pd->full[i], &my_pd->free[i]);
|
||||
list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
|
||||
|
@ -43,9 +43,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include "ehca_classes.h"
|
||||
#include "ehca_tools.h"
|
||||
#include "ehca_qes.h"
|
||||
@ -424,6 +421,9 @@ static struct ehca_qp *internal_create_qp(
|
||||
u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
|
||||
unsigned long flags;
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
memset(&parms, 0, sizeof(parms));
|
||||
qp_type = init_attr->qp_type;
|
||||
|
||||
@ -1526,16 +1526,6 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
|
||||
struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
|
||||
ib_device);
|
||||
struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
|
||||
struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
|
||||
ib_pd);
|
||||
u32 cur_pid = current->tgid;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(ibqp->pd->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The if-block below caches qp_attr to be modified for GSI and SMI
|
||||
* qps during the initialization by ib_mad. When the respective port
|
||||
@ -1636,23 +1626,13 @@ int ehca_query_qp(struct ib_qp *qp,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
|
||||
{
|
||||
struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
|
||||
struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
|
||||
ib_pd);
|
||||
struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
|
||||
ib_device);
|
||||
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
|
||||
struct hcp_modify_qp_control_block *qpcb;
|
||||
u32 cur_pid = current->tgid;
|
||||
int cnt, ret = 0;
|
||||
u64 h_ret;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(qp->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
|
||||
ehca_err(qp->device, "Invalid attribute mask "
|
||||
"ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
|
||||
@ -1797,8 +1777,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
{
|
||||
struct ehca_qp *my_qp =
|
||||
container_of(ibsrq, struct ehca_qp, ib_srq);
|
||||
struct ehca_pd *my_pd =
|
||||
container_of(ibsrq->pd, struct ehca_pd, ib_pd);
|
||||
struct ehca_shca *shca =
|
||||
container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
|
||||
struct hcp_modify_qp_control_block *mqpcb;
|
||||
@ -1806,14 +1784,6 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
u64 h_ret;
|
||||
int ret = 0;
|
||||
|
||||
u32 cur_pid = current->tgid;
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!mqpcb) {
|
||||
ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
|
||||
@ -1864,22 +1834,13 @@ modify_srq_exit0:
|
||||
int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
|
||||
{
|
||||
struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
|
||||
struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
|
||||
struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
|
||||
ib_device);
|
||||
struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
|
||||
struct hcp_modify_qp_control_block *qpcb;
|
||||
u32 cur_pid = current->tgid;
|
||||
int ret = 0;
|
||||
u64 h_ret;
|
||||
|
||||
if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
|
||||
my_pd->ownpid != cur_pid) {
|
||||
ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
|
||||
if (!qpcb) {
|
||||
ehca_err(srq->device, "Out of memory for qpcb "
|
||||
@ -1919,7 +1880,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
|
||||
struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
|
||||
ib_pd);
|
||||
struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
|
||||
u32 cur_pid = current->tgid;
|
||||
u32 qp_num = my_qp->real_qp_num;
|
||||
int ret;
|
||||
u64 h_ret;
|
||||
@ -1934,11 +1894,6 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
|
||||
"user space qp_num=%x", qp_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (my_pd->ownpid != cur_pid) {
|
||||
ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, my_pd->ownpid);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (my_qp->send_cq) {
|
||||
|
@ -188,7 +188,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
|
||||
if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
|
||||
/* this might not work as long as HW does not support it */
|
||||
wqe_p->immediate_data = be32_to_cpu(send_wr->imm_data);
|
||||
wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
|
||||
wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
|
||||
}
|
||||
|
||||
|
@ -73,37 +73,37 @@ extern int ehca_debug_level;
|
||||
if (unlikely(ehca_debug_level)) \
|
||||
dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
|
||||
"PU%04x EHCA_DBG:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, \
|
||||
raw_smp_processor_id(), __func__, \
|
||||
## arg); \
|
||||
} while (0)
|
||||
|
||||
#define ehca_info(ib_dev, format, arg...) \
|
||||
dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg)
|
||||
raw_smp_processor_id(), __func__, ## arg)
|
||||
|
||||
#define ehca_warn(ib_dev, format, arg...) \
|
||||
dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg)
|
||||
raw_smp_processor_id(), __func__, ## arg)
|
||||
|
||||
#define ehca_err(ib_dev, format, arg...) \
|
||||
dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg)
|
||||
raw_smp_processor_id(), __func__, ## arg)
|
||||
|
||||
/* use this one only if no ib_dev available */
|
||||
#define ehca_gen_dbg(format, arg...) \
|
||||
do { \
|
||||
if (unlikely(ehca_debug_level)) \
|
||||
printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg); \
|
||||
raw_smp_processor_id(), __func__, ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define ehca_gen_warn(format, arg...) \
|
||||
printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg)
|
||||
raw_smp_processor_id(), __func__, ## arg)
|
||||
|
||||
#define ehca_gen_err(format, arg...) \
|
||||
printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
|
||||
raw_smp_processor_id(), __FUNCTION__, ## arg)
|
||||
raw_smp_processor_id(), __func__, ## arg)
|
||||
|
||||
/**
|
||||
* ehca_dmp - printk a memory block, whose length is n*8 bytes.
|
||||
@ -118,7 +118,7 @@ extern int ehca_debug_level;
|
||||
for (x = 0; x < l; x += 16) { \
|
||||
printk(KERN_INFO "EHCA_DMP:%s " format \
|
||||
" adr=%p ofs=%04x %016lx %016lx\n", \
|
||||
__FUNCTION__, ##args, deb, x, \
|
||||
__func__, ##args, deb, x, \
|
||||
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
|
||||
deb += 16; \
|
||||
} \
|
||||
|
@ -40,8 +40,6 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <asm/current.h>
|
||||
|
||||
#include "ehca_classes.h"
|
||||
#include "ehca_iverbs.h"
|
||||
#include "ehca_mrmw.h"
|
||||
@ -253,11 +251,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
u32 idr_handle = fileoffset & 0x1FFFFFF;
|
||||
u32 q_type = (fileoffset >> 27) & 0x1; /* CQ, QP,... */
|
||||
u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
|
||||
u32 cur_pid = current->tgid;
|
||||
u32 ret;
|
||||
struct ehca_cq *cq;
|
||||
struct ehca_qp *qp;
|
||||
struct ehca_pd *pd;
|
||||
struct ib_uobject *uobject;
|
||||
|
||||
switch (q_type) {
|
||||
@ -270,13 +266,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
if (!cq)
|
||||
return -EINVAL;
|
||||
|
||||
if (cq->ownpid != cur_pid) {
|
||||
ehca_err(cq->ib_cq.device,
|
||||
"Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, cq->ownpid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
|
||||
return -EINVAL;
|
||||
|
||||
@ -298,14 +287,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
if (!qp)
|
||||
return -EINVAL;
|
||||
|
||||
pd = container_of(qp->ib_qp.pd, struct ehca_pd, ib_pd);
|
||||
if (pd->ownpid != cur_pid) {
|
||||
ehca_err(qp->ib_qp.device,
|
||||
"Invalid caller pid=%x ownpid=%x",
|
||||
cur_pid, pd->ownpid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
|
||||
if (!uobject || uobject->context != context)
|
||||
return -EINVAL;
|
||||
|
@ -20,17 +20,20 @@ ib_ipath-y := \
|
||||
ipath_qp.o \
|
||||
ipath_rc.o \
|
||||
ipath_ruc.o \
|
||||
ipath_sdma.o \
|
||||
ipath_srq.o \
|
||||
ipath_stats.o \
|
||||
ipath_sysfs.o \
|
||||
ipath_uc.o \
|
||||
ipath_ud.o \
|
||||
ipath_user_pages.o \
|
||||
ipath_user_sdma.o \
|
||||
ipath_verbs_mcast.o \
|
||||
ipath_verbs.o
|
||||
|
||||
ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
|
||||
ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
|
||||
ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba7220.o ipath_sd7220.o ipath_sd7220_img.o
|
||||
|
||||
ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
|
||||
ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
|
||||
|
57
drivers/infiniband/hw/ipath/ipath_7220.h
Normal file
57
drivers/infiniband/hw/ipath/ipath_7220.h
Normal file
@ -0,0 +1,57 @@
|
||||
#ifndef _IPATH_7220_H
|
||||
#define _IPATH_7220_H
|
||||
/*
|
||||
* Copyright (c) 2007 QLogic Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This header file provides the declarations and common definitions
|
||||
* for (mostly) manipulation of the SerDes blocks within the IBA7220.
|
||||
* the functions declared should only be called from within other
|
||||
* 7220-related files such as ipath_iba7220.c or ipath_sd7220.c.
|
||||
*/
|
||||
int ipath_sd7220_presets(struct ipath_devdata *dd);
|
||||
int ipath_sd7220_init(struct ipath_devdata *dd, int was_reset);
|
||||
int ipath_sd7220_prog_ld(struct ipath_devdata *dd, int sdnum, u8 *img,
|
||||
int len, int offset);
|
||||
int ipath_sd7220_prog_vfy(struct ipath_devdata *dd, int sdnum, const u8 *img,
|
||||
int len, int offset);
|
||||
/*
|
||||
* Below used for sdnum parameter, selecting one of the two sections
|
||||
* used for PCIe, or the single SerDes used for IB, which is the
|
||||
* only one currently used
|
||||
*/
|
||||
#define IB_7220_SERDES 2
|
||||
|
||||
int ipath_sd7220_ib_load(struct ipath_devdata *dd);
|
||||
int ipath_sd7220_ib_vfy(struct ipath_devdata *dd);
|
||||
|
||||
#endif /* _IPATH_7220_H */
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -80,6 +80,8 @@
|
||||
#define IPATH_IB_LINKDOWN_DISABLE 5
|
||||
#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
|
||||
#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
|
||||
#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
|
||||
#define IPATH_IB_LINK_HRTBT 9 /* enable heartbeat, normal, non-loopback */
|
||||
|
||||
/*
|
||||
* These 3 values (SDR and DDR may be ORed for auto-speed
|
||||
@ -198,7 +200,8 @@ typedef enum _ipath_ureg {
|
||||
#define IPATH_RUNTIME_FORCE_WC_ORDER 0x4
|
||||
#define IPATH_RUNTIME_RCVHDR_COPY 0x8
|
||||
#define IPATH_RUNTIME_MASTER 0x10
|
||||
/* 0x20 and 0x40 are no longer used, but are reserved for ABI compatibility */
|
||||
#define IPATH_RUNTIME_NODMA_RTAIL 0x80
|
||||
#define IPATH_RUNTIME_SDMA 0x200
|
||||
#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
|
||||
#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
|
||||
|
||||
@ -444,8 +447,9 @@ struct ipath_user_info {
|
||||
#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
|
||||
#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
|
||||
#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
|
||||
|
||||
#define IPATH_CMD_MAX 29
|
||||
/* 30 is unused */
|
||||
#define IPATH_CMD_SDMA_INFLIGHT 31 /* sdma inflight counter request */
|
||||
#define IPATH_CMD_SDMA_COMPLETE 32 /* sdma completion counter request */
|
||||
|
||||
/*
|
||||
* Poll types
|
||||
@ -483,6 +487,17 @@ struct ipath_cmd {
|
||||
union {
|
||||
struct ipath_tid_info tid_info;
|
||||
struct ipath_user_info user_info;
|
||||
|
||||
/*
|
||||
* address in userspace where we should put the sdma
|
||||
* inflight counter
|
||||
*/
|
||||
__u64 sdma_inflight;
|
||||
/*
|
||||
* address in userspace where we should put the sdma
|
||||
* completion counter
|
||||
*/
|
||||
__u64 sdma_complete;
|
||||
/* address in userspace of struct ipath_port_info to
|
||||
write result to */
|
||||
__u64 port_info;
|
||||
@ -537,7 +552,7 @@ struct ipath_diag_pkt {
|
||||
|
||||
/* The second diag_pkt struct is the expanded version that allows
|
||||
* more control over the packet, specifically, by allowing a custom
|
||||
* pbc (+ extra) qword, so that special modes and deliberate
|
||||
* pbc (+ static rate) qword, so that special modes and deliberate
|
||||
* changes to CRCs can be used. The elements were also re-ordered
|
||||
* for better alignment and to avoid padding issues.
|
||||
*/
|
||||
@ -662,8 +677,12 @@ struct infinipath_counters {
|
||||
#define INFINIPATH_RHF_LENGTH_SHIFT 0
|
||||
#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
|
||||
#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
|
||||
#define INFINIPATH_RHF_EGRINDEX_MASK 0x7FF
|
||||
#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
|
||||
#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
|
||||
#define INFINIPATH_RHF_SEQ_MASK 0xF
|
||||
#define INFINIPATH_RHF_SEQ_SHIFT 0
|
||||
#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
|
||||
#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
|
||||
#define INFINIPATH_RHF_H_ICRCERR 0x80000000
|
||||
#define INFINIPATH_RHF_H_VCRCERR 0x40000000
|
||||
#define INFINIPATH_RHF_H_PARITYERR 0x20000000
|
||||
@ -673,6 +692,8 @@ struct infinipath_counters {
|
||||
#define INFINIPATH_RHF_H_TIDERR 0x02000000
|
||||
#define INFINIPATH_RHF_H_MKERR 0x01000000
|
||||
#define INFINIPATH_RHF_H_IBERR 0x00800000
|
||||
#define INFINIPATH_RHF_H_ERR_MASK 0xFF800000
|
||||
#define INFINIPATH_RHF_L_USE_EGR 0x80000000
|
||||
#define INFINIPATH_RHF_L_SWA 0x00008000
|
||||
#define INFINIPATH_RHF_L_SWB 0x00004000
|
||||
|
||||
@ -696,6 +717,7 @@ struct infinipath_counters {
|
||||
/* SendPIO per-buffer control */
|
||||
#define INFINIPATH_SP_TEST 0x40
|
||||
#define INFINIPATH_SP_TESTEBP 0x20
|
||||
#define INFINIPATH_SP_TRIGGER_SHIFT 15
|
||||
|
||||
/* SendPIOAvail bits */
|
||||
#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
|
||||
@ -762,6 +784,7 @@ struct ether_header {
|
||||
#define IPATH_MSN_MASK 0xFFFFFF
|
||||
#define IPATH_QPN_MASK 0xFFFFFF
|
||||
#define IPATH_MULTICAST_LID_BASE 0xC000
|
||||
#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
|
||||
#define IPATH_MULTICAST_QPN 0xFFFFFF
|
||||
|
||||
/* Receive Header Queue: receive type (from infinipath) */
|
||||
@ -781,7 +804,7 @@ struct ether_header {
|
||||
*/
|
||||
static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
|
||||
{
|
||||
return __le32_to_cpu(rbuf[1]);
|
||||
return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
|
||||
@ -802,6 +825,23 @@ static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
|
||||
& INFINIPATH_RHF_EGRINDEX_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
|
||||
{
|
||||
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
|
||||
& INFINIPATH_RHF_SEQ_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
|
||||
{
|
||||
return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
|
||||
& INFINIPATH_RHF_HDRQ_OFFSET_MASK;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
|
||||
{
|
||||
return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
|
||||
}
|
||||
|
||||
static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
|
||||
{
|
||||
return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
|
||||
|
@ -66,6 +66,7 @@
|
||||
#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
|
||||
#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
|
||||
#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
|
||||
#define __IPATH_LINKVERBDBG 0x200000 /* very verbose linkchange debug */
|
||||
|
||||
#else /* _IPATH_DEBUGGING */
|
||||
|
||||
@ -89,6 +90,7 @@
|
||||
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
|
||||
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */
|
||||
#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on */
|
||||
#define __IPATH_LINKVERBDBG 0x0 /* very verbose linkchange debug */
|
||||
|
||||
#endif /* _IPATH_DEBUGGING */
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -330,13 +330,19 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
struct ipath_devdata *dd;
|
||||
ssize_t ret = 0;
|
||||
u64 val;
|
||||
u32 l_state, lt_state; /* LinkState, LinkTrainingState */
|
||||
|
||||
if (count != sizeof(dp)) {
|
||||
if (count < sizeof(odp)) {
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (copy_from_user(&dp, data, sizeof(dp))) {
|
||||
if (count == sizeof(dp)) {
|
||||
if (copy_from_user(&dp, data, sizeof(dp))) {
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
} else if (copy_from_user(&odp, data, sizeof(odp))) {
|
||||
ret = -EFAULT;
|
||||
goto bail;
|
||||
}
|
||||
@ -396,10 +402,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
ret = -ENODEV;
|
||||
goto bail;
|
||||
}
|
||||
/* Check link state, but not if we have custom PBC */
|
||||
val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
|
||||
if (!dp.pbc_wd && val != IPATH_IBSTATE_INIT &&
|
||||
val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) {
|
||||
/*
|
||||
* Want to skip check for l_state if using custom PBC,
|
||||
* because we might be trying to force an SM packet out.
|
||||
* first-cut, skip _all_ state checking in that case.
|
||||
*/
|
||||
val = ipath_ib_state(dd, dd->ipath_lastibcstat);
|
||||
lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
|
||||
l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
|
||||
if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
|
||||
(val != dd->ib_init && val != dd->ib_arm &&
|
||||
val != dd->ib_active))) {
|
||||
ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
|
||||
dd->ipath_unit, (unsigned long long) val);
|
||||
ret = -EINVAL;
|
||||
@ -431,15 +444,17 @@ static ssize_t ipath_diagpkt_write(struct file *fp,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
piobuf = ipath_getpiobuf(dd, &pbufn);
|
||||
plen >>= 2; /* in dwords */
|
||||
|
||||
piobuf = ipath_getpiobuf(dd, plen, &pbufn);
|
||||
if (!piobuf) {
|
||||
ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
|
||||
dd->ipath_unit);
|
||||
ret = -EBUSY;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
plen >>= 2; /* in dwords */
|
||||
/* disarm it just to be extra sure */
|
||||
ipath_disarm_piobufs(dd, pbufn, 1);
|
||||
|
||||
if (ipath_debug & __IPATH_PKTDBG)
|
||||
ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -62,6 +62,33 @@
|
||||
* accessing eeprom contents from within the kernel, only via sysfs.
|
||||
*/
|
||||
|
||||
/* Added functionality for IBA7220-based cards */
|
||||
#define IPATH_EEPROM_DEV_V1 0xA0
|
||||
#define IPATH_EEPROM_DEV_V2 0xA2
|
||||
#define IPATH_TEMP_DEV 0x98
|
||||
#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
|
||||
#define IPATH_NO_DEV (0xFF)
|
||||
|
||||
/*
|
||||
* The number of I2C chains is proliferating. Table below brings
|
||||
* some order to the madness. The basic principle is that the
|
||||
* table is scanned from the top, and a "probe" is made to the
|
||||
* device probe_dev. If that succeeds, the chain is considered
|
||||
* to be of that type, and dd->i2c_chain_type is set to the index+1
|
||||
* of the entry.
|
||||
* The +1 is so static initialization can mean "unknown, do probe."
|
||||
*/
|
||||
static struct i2c_chain_desc {
|
||||
u8 probe_dev; /* If seen at probe, chain is this type */
|
||||
u8 eeprom_dev; /* Dev addr (if any) for EEPROM */
|
||||
u8 temp_dev; /* Dev Addr (if any) for Temp-sense */
|
||||
} i2c_chains[] = {
|
||||
{ IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
|
||||
{ IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
|
||||
{ IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
|
||||
{ IPATH_NO_DEV }
|
||||
};
|
||||
|
||||
enum i2c_type {
|
||||
i2c_line_scl = 0,
|
||||
i2c_line_sda
|
||||
@ -75,13 +102,6 @@ enum i2c_state {
|
||||
#define READ_CMD 1
|
||||
#define WRITE_CMD 0
|
||||
|
||||
static int eeprom_init;
|
||||
|
||||
/*
|
||||
* The gpioval manipulation really should be protected by spinlocks
|
||||
* or be converted to use atomic operations.
|
||||
*/
|
||||
|
||||
/**
|
||||
* i2c_gpio_set - set a GPIO line
|
||||
* @dd: the infinipath device
|
||||
@ -240,6 +260,27 @@ static int i2c_ackrcv(struct ipath_devdata *dd)
|
||||
return ack_received;
|
||||
}
|
||||
|
||||
/**
|
||||
* rd_byte - read a byte, leaving ACK, STOP, etc up to caller
|
||||
* @dd: the infinipath device
|
||||
*
|
||||
* Returns byte shifted out of device
|
||||
*/
|
||||
static int rd_byte(struct ipath_devdata *dd)
|
||||
{
|
||||
int bit_cntr, data;
|
||||
|
||||
data = 0;
|
||||
|
||||
for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
|
||||
data <<= 1;
|
||||
scl_out(dd, i2c_line_high);
|
||||
data |= sda_in(dd, 0);
|
||||
scl_out(dd, i2c_line_low);
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* wr_byte - write a byte, one bit at a time
|
||||
* @dd: the infinipath device
|
||||
@ -331,7 +372,6 @@ static int eeprom_reset(struct ipath_devdata *dd)
|
||||
ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
|
||||
"is %llx\n", (unsigned long long) *gpioval);
|
||||
|
||||
eeprom_init = 1;
|
||||
/*
|
||||
* This is to get the i2c into a known state, by first going low,
|
||||
* then tristate sda (and then tristate scl as first thing
|
||||
@ -340,12 +380,17 @@ static int eeprom_reset(struct ipath_devdata *dd)
|
||||
scl_out(dd, i2c_line_low);
|
||||
sda_out(dd, i2c_line_high);
|
||||
|
||||
/* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
|
||||
while (clock_cycles_left--) {
|
||||
scl_out(dd, i2c_line_high);
|
||||
|
||||
/* SDA seen high, issue START by dropping it while SCL high */
|
||||
if (sda_in(dd, 0)) {
|
||||
sda_out(dd, i2c_line_low);
|
||||
scl_out(dd, i2c_line_low);
|
||||
/* ATMEL spec says must be followed by STOP. */
|
||||
scl_out(dd, i2c_line_high);
|
||||
sda_out(dd, i2c_line_high);
|
||||
ret = 0;
|
||||
goto bail;
|
||||
}
|
||||
@ -359,29 +404,121 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_eeprom_read - receives bytes from the eeprom via I2C
|
||||
* @dd: the infinipath device
|
||||
* @eeprom_offset: address to read from
|
||||
* @buffer: where to store result
|
||||
* @len: number of bytes to receive
|
||||
/*
|
||||
* Probe for I2C device at specified address. Returns 0 for "success"
|
||||
* to match rest of this file.
|
||||
* Leave bus in "reasonable" state for further commands.
|
||||
*/
|
||||
static int i2c_probe(struct ipath_devdata *dd, int devaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = eeprom_reset(dd);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
|
||||
devaddr);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Reset no longer leaves bus in start condition, so normal
|
||||
* i2c_startcmd() will do.
|
||||
*/
|
||||
ret = i2c_startcmd(dd, devaddr | READ_CMD);
|
||||
if (ret)
|
||||
ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
|
||||
devaddr);
|
||||
else {
|
||||
/*
|
||||
* Device did respond. Complete a single-byte read, because some
|
||||
* devices apparently cannot handle STOP immediately after they
|
||||
* ACK the start-cmd.
|
||||
*/
|
||||
int data;
|
||||
data = rd_byte(dd);
|
||||
stop_cmd(dd);
|
||||
ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the "i2c type". This is a pointer to a struct that describes
|
||||
* the I2C chain on this board. To minimize impact on struct ipath_devdata,
|
||||
* the (small integer) index into the table is actually memoized, rather
|
||||
* then the pointer.
|
||||
* Memoization is because the type is determined on the first call per chip.
|
||||
* An alternative would be to move type determination to early
|
||||
* init code.
|
||||
*/
|
||||
static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
|
||||
{
|
||||
int idx;
|
||||
|
||||
/* Get memoized index, from previous successful probes */
|
||||
idx = dd->ipath_i2c_chain_type - 1;
|
||||
if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
|
||||
goto done;
|
||||
|
||||
idx = 0;
|
||||
while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
|
||||
/* if probe succeeds, this is type */
|
||||
if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
|
||||
break;
|
||||
++idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Old EEPROM (first entry) may require a reset after probe,
|
||||
* rather than being able to "start" after "stop"
|
||||
*/
|
||||
if (idx == 0)
|
||||
eeprom_reset(dd);
|
||||
|
||||
if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
|
||||
idx = -1;
|
||||
else
|
||||
dd->ipath_i2c_chain_type = idx + 1;
|
||||
done:
|
||||
return (idx >= 0) ? i2c_chains + idx : NULL;
|
||||
}
|
||||
|
||||
static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
|
||||
u8 eeprom_offset, void *buffer, int len)
|
||||
{
|
||||
/* compiler complains unless initialized */
|
||||
u8 single_byte = 0;
|
||||
int bit_cntr;
|
||||
int ret;
|
||||
struct i2c_chain_desc *icd;
|
||||
u8 *bp = buffer;
|
||||
|
||||
if (!eeprom_init)
|
||||
eeprom_reset(dd);
|
||||
ret = 1;
|
||||
icd = ipath_i2c_type(dd);
|
||||
if (!icd)
|
||||
goto bail;
|
||||
|
||||
eeprom_offset = (eeprom_offset << 1) | READ_CMD;
|
||||
|
||||
if (i2c_startcmd(dd, eeprom_offset)) {
|
||||
ipath_dbg("Failed startcmd\n");
|
||||
if (icd->eeprom_dev == IPATH_NO_DEV) {
|
||||
/* legacy not-really-I2C */
|
||||
ipath_cdbg(VERBOSE, "Start command only address\n");
|
||||
eeprom_offset = (eeprom_offset << 1) | READ_CMD;
|
||||
ret = i2c_startcmd(dd, eeprom_offset);
|
||||
} else {
|
||||
/* Actual I2C */
|
||||
ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
|
||||
if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
|
||||
ipath_dbg("Failed EEPROM startcmd\n");
|
||||
stop_cmd(dd);
|
||||
ret = 1;
|
||||
goto bail;
|
||||
}
|
||||
ret = wr_byte(dd, eeprom_offset);
|
||||
stop_cmd(dd);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed to write EEPROM address\n");
|
||||
ret = 1;
|
||||
goto bail;
|
||||
}
|
||||
ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
|
||||
}
|
||||
if (ret) {
|
||||
ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
|
||||
stop_cmd(dd);
|
||||
ret = 1;
|
||||
goto bail;
|
||||
@ -392,22 +529,11 @@ static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
|
||||
* incrementing the address.
|
||||
*/
|
||||
while (len-- > 0) {
|
||||
/* get data */
|
||||
single_byte = 0;
|
||||
for (bit_cntr = 8; bit_cntr; bit_cntr--) {
|
||||
u8 bit;
|
||||
scl_out(dd, i2c_line_high);
|
||||
bit = sda_in(dd, 0);
|
||||
single_byte |= bit << (bit_cntr - 1);
|
||||
scl_out(dd, i2c_line_low);
|
||||
}
|
||||
|
||||
/* get and store data */
|
||||
*bp++ = rd_byte(dd);
|
||||
/* send ack if not the last byte */
|
||||
if (len)
|
||||
send_ack(dd);
|
||||
|
||||
*((u8 *) buffer) = single_byte;
|
||||
buffer++;
|
||||
}
|
||||
|
||||
stop_cmd(dd);
|
||||
@ -418,31 +544,40 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ipath_eeprom_write - writes data to the eeprom via I2C
|
||||
* @dd: the infinipath device
|
||||
* @eeprom_offset: where to place data
|
||||
* @buffer: data to write
|
||||
* @len: number of bytes to write
|
||||
*/
|
||||
static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
const void *buffer, int len)
|
||||
{
|
||||
u8 single_byte;
|
||||
int sub_len;
|
||||
const u8 *bp = buffer;
|
||||
int max_wait_time, i;
|
||||
int ret;
|
||||
struct i2c_chain_desc *icd;
|
||||
|
||||
if (!eeprom_init)
|
||||
eeprom_reset(dd);
|
||||
ret = 1;
|
||||
icd = ipath_i2c_type(dd);
|
||||
if (!icd)
|
||||
goto bail;
|
||||
|
||||
while (len > 0) {
|
||||
if (i2c_startcmd(dd, (eeprom_offset << 1) | WRITE_CMD)) {
|
||||
ipath_dbg("Failed to start cmd offset %u\n",
|
||||
eeprom_offset);
|
||||
goto failed_write;
|
||||
if (icd->eeprom_dev == IPATH_NO_DEV) {
|
||||
if (i2c_startcmd(dd,
|
||||
(eeprom_offset << 1) | WRITE_CMD)) {
|
||||
ipath_dbg("Failed to start cmd offset %u\n",
|
||||
eeprom_offset);
|
||||
goto failed_write;
|
||||
}
|
||||
} else {
|
||||
/* Real I2C */
|
||||
if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
|
||||
ipath_dbg("Failed EEPROM startcmd\n");
|
||||
goto failed_write;
|
||||
}
|
||||
ret = wr_byte(dd, eeprom_offset);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed to write EEPROM "
|
||||
"address\n");
|
||||
goto failed_write;
|
||||
}
|
||||
}
|
||||
|
||||
sub_len = min(len, 4);
|
||||
@ -468,9 +603,11 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
|
||||
* the writes have completed. We do this inline to avoid
|
||||
* the debug prints that are in the real read routine
|
||||
* if the startcmd fails.
|
||||
* We also use the proper device address, so it doesn't matter
|
||||
* whether we have real eeprom_dev. legacy likes any address.
|
||||
*/
|
||||
max_wait_time = 100;
|
||||
while (i2c_startcmd(dd, READ_CMD)) {
|
||||
while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
|
||||
stop_cmd(dd);
|
||||
if (!--max_wait_time) {
|
||||
ipath_dbg("Did not get successful read to "
|
||||
@ -478,15 +615,8 @@ static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offse
|
||||
goto failed_write;
|
||||
}
|
||||
}
|
||||
/* now read the zero byte */
|
||||
for (i = single_byte = 0; i < 8; i++) {
|
||||
u8 bit;
|
||||
scl_out(dd, i2c_line_high);
|
||||
bit = sda_in(dd, 0);
|
||||
scl_out(dd, i2c_line_low);
|
||||
single_byte <<= 1;
|
||||
single_byte |= bit;
|
||||
}
|
||||
/* now read (and ignore) the resulting byte */
|
||||
rd_byte(dd);
|
||||
stop_cmd(dd);
|
||||
}
|
||||
|
||||
@ -501,9 +631,12 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The public entry-points ipath_eeprom_read() and ipath_eeprom_write()
|
||||
* are now just wrappers around the internal functions.
|
||||
/**
|
||||
* ipath_eeprom_read - receives bytes from the eeprom via I2C
|
||||
* @dd: the infinipath device
|
||||
* @eeprom_offset: address to read from
|
||||
* @buffer: where to store result
|
||||
* @len: number of bytes to receive
|
||||
*/
|
||||
int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
void *buff, int len)
|
||||
@ -519,6 +652,13 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_eeprom_write - writes data to the eeprom via I2C
|
||||
* @dd: the infinipath device
|
||||
* @eeprom_offset: where to place data
|
||||
* @buffer: data to write
|
||||
* @len: number of bytes to write
|
||||
*/
|
||||
int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
|
||||
const void *buff, int len)
|
||||
{
|
||||
@ -820,7 +960,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
|
||||
* if we log an hour at 31 minutes, then we would need to set
|
||||
* active_time to -29 to accurately count the _next_ hour.
|
||||
*/
|
||||
if (new_time > 3600) {
|
||||
if (new_time >= 3600) {
|
||||
new_hrs = new_time / 3600;
|
||||
atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
|
||||
new_hrs += dd->ipath_eep_hrs;
|
||||
@ -885,3 +1025,159 @@ void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
|
||||
spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
|
||||
{
|
||||
int ret;
|
||||
struct i2c_chain_desc *icd;
|
||||
|
||||
ret = -ENOENT;
|
||||
|
||||
icd = ipath_i2c_type(dd);
|
||||
if (!icd)
|
||||
goto bail;
|
||||
|
||||
if (icd->temp_dev == IPATH_NO_DEV) {
|
||||
/* tempsense only exists on new, real-I2C boards */
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
|
||||
ipath_dbg("Failed tempsense startcmd\n");
|
||||
stop_cmd(dd);
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
ret = wr_byte(dd, regnum);
|
||||
stop_cmd(dd);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
|
||||
regnum);
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
|
||||
ipath_dbg("Failed tempsense RD startcmd\n");
|
||||
stop_cmd(dd);
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
/*
|
||||
* We can only clock out one byte per command, sensibly
|
||||
*/
|
||||
ret = rd_byte(dd);
|
||||
stop_cmd(dd);
|
||||
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define VALID_TS_RD_REG_MASK 0xBF
|
||||
|
||||
/**
|
||||
* ipath_tempsense_read - read register of temp sensor via I2C
|
||||
* @dd: the infinipath device
|
||||
* @regnum: register to read from
|
||||
*
|
||||
* returns reg contents (0..255) or < 0 for error
|
||||
*/
|
||||
int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (regnum > 7)
|
||||
return -EINVAL;
|
||||
|
||||
/* return a bogus value for (the one) register we do not have */
|
||||
if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
|
||||
return 0;
|
||||
|
||||
ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
|
||||
if (!ret) {
|
||||
ret = ipath_tempsense_internal_read(dd, regnum);
|
||||
mutex_unlock(&dd->ipath_eep_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are three possibilities here:
|
||||
* ret is actual value (0..255)
|
||||
* ret is -ENXIO or -EINVAL from code in this file
|
||||
* ret is -EINTR from mutex_lock_interruptible.
|
||||
*/
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
|
||||
u8 regnum, u8 data)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
struct i2c_chain_desc *icd;
|
||||
|
||||
icd = ipath_i2c_type(dd);
|
||||
if (!icd)
|
||||
goto bail;
|
||||
|
||||
if (icd->temp_dev == IPATH_NO_DEV) {
|
||||
/* tempsense only exists on new, real-I2C boards */
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
|
||||
ipath_dbg("Failed tempsense startcmd\n");
|
||||
stop_cmd(dd);
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
ret = wr_byte(dd, regnum);
|
||||
if (ret) {
|
||||
stop_cmd(dd);
|
||||
ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
|
||||
regnum);
|
||||
ret = -ENXIO;
|
||||
goto bail;
|
||||
}
|
||||
ret = wr_byte(dd, data);
|
||||
stop_cmd(dd);
|
||||
ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
|
||||
regnum);
|
||||
ret = -ENXIO;
|
||||
}
|
||||
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
|
||||
|
||||
/**
|
||||
* ipath_tempsense_write - write register of temp sensor via I2C
|
||||
* @dd: the infinipath device
|
||||
* @regnum: register to write
|
||||
* @data: data to write
|
||||
*
|
||||
* returns 0 for success or < 0 for error
|
||||
*/
|
||||
int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
|
||||
if (!ret) {
|
||||
ret = ipath_tempsense_internal_write(dd, regnum, data);
|
||||
mutex_unlock(&dd->ipath_eep_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are three possibilities here:
|
||||
* ret is 0 for success
|
||||
* ret is -ENXIO or -EINVAL from code in this file
|
||||
* ret is -EINTR from mutex_lock_interruptible.
|
||||
*/
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -36,21 +36,28 @@
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_common.h"
|
||||
#include "ipath_user_sdma.h"
|
||||
|
||||
static int ipath_open(struct inode *, struct file *);
|
||||
static int ipath_close(struct inode *, struct file *);
|
||||
static ssize_t ipath_write(struct file *, const char __user *, size_t,
|
||||
loff_t *);
|
||||
static ssize_t ipath_writev(struct kiocb *, const struct iovec *,
|
||||
unsigned long , loff_t);
|
||||
static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
|
||||
static int ipath_mmap(struct file *, struct vm_area_struct *);
|
||||
|
||||
static const struct file_operations ipath_file_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.write = ipath_write,
|
||||
.aio_write = ipath_writev,
|
||||
.open = ipath_open,
|
||||
.release = ipath_close,
|
||||
.poll = ipath_poll,
|
||||
@ -184,6 +191,29 @@ static int ipath_get_base_info(struct file *fp,
|
||||
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
|
||||
dd->ipath_palign * kinfo->spi_piocnt * slave;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the PIO avail update threshold to no larger
|
||||
* than the number of buffers per process. Note that
|
||||
* we decrease it here, but won't ever increase it.
|
||||
*/
|
||||
if (dd->ipath_pioupd_thresh &&
|
||||
kinfo->spi_piocnt < dd->ipath_pioupd_thresh) {
|
||||
unsigned long flags;
|
||||
|
||||
dd->ipath_pioupd_thresh = kinfo->spi_piocnt;
|
||||
ipath_dbg("Decreased pio update threshold to %u\n",
|
||||
dd->ipath_pioupd_thresh);
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
|
||||
<< INFINIPATH_S_UPDTHRESH_SHIFT);
|
||||
dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
|
||||
<< INFINIPATH_S_UPDTHRESH_SHIFT;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
}
|
||||
|
||||
if (shared) {
|
||||
kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
|
||||
dd->ipath_ureg_align * pd->port_port;
|
||||
@ -219,7 +249,12 @@ static int ipath_get_base_info(struct file *fp,
|
||||
kinfo->spi_pioalign = dd->ipath_palign;
|
||||
|
||||
kinfo->spi_qpair = IPATH_KD_QP;
|
||||
kinfo->spi_piosize = dd->ipath_ibmaxlen;
|
||||
/*
|
||||
* user mode PIO buffers are always 2KB, even when 4KB can
|
||||
* be received, and sent via the kernel; this is ibmaxlen
|
||||
* for 2K MTU.
|
||||
*/
|
||||
kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
|
||||
kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
|
||||
kinfo->spi_port = pd->port_port;
|
||||
kinfo->spi_subport = subport_fp(fp);
|
||||
@ -1598,6 +1633,9 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
|
||||
port_fp(fp) = pd;
|
||||
pd->port_pid = current->pid;
|
||||
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
|
||||
ipath_chg_pioavailkernel(dd,
|
||||
dd->ipath_pbufsport * (pd->port_port - 1),
|
||||
dd->ipath_pbufsport, 0);
|
||||
ipath_stats.sps_ports++;
|
||||
ret = 0;
|
||||
} else
|
||||
@ -1760,7 +1798,7 @@ static int find_shared_port(struct file *fp,
|
||||
for (ndev = 0; ndev < devmax; ndev++) {
|
||||
struct ipath_devdata *dd = ipath_lookup(ndev);
|
||||
|
||||
if (!dd)
|
||||
if (!usable(dd))
|
||||
continue;
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
@ -1839,10 +1877,9 @@ static int ipath_assign_port(struct file *fp,
|
||||
if (ipath_compatible_subports(swmajor, swminor) &&
|
||||
uinfo->spu_subport_cnt &&
|
||||
(ret = find_shared_port(fp, uinfo))) {
|
||||
mutex_unlock(&ipath_mutex);
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
goto done;
|
||||
goto done_chk_sdma;
|
||||
}
|
||||
|
||||
i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
|
||||
@ -1854,6 +1891,21 @@ static int ipath_assign_port(struct file *fp,
|
||||
else
|
||||
ret = find_best_unit(fp, uinfo);
|
||||
|
||||
done_chk_sdma:
|
||||
if (!ret) {
|
||||
struct ipath_filedata *fd = fp->private_data;
|
||||
const struct ipath_portdata *pd = fd->pd;
|
||||
const struct ipath_devdata *dd = pd->port_dd;
|
||||
|
||||
fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
|
||||
dd->ipath_unit,
|
||||
pd->port_port,
|
||||
fd->subport);
|
||||
|
||||
if (!fd->pq)
|
||||
ret = -ENOMEM;
|
||||
}
|
||||
|
||||
mutex_unlock(&ipath_mutex);
|
||||
|
||||
done:
|
||||
@ -1922,22 +1974,25 @@ static int ipath_do_user_init(struct file *fp,
|
||||
pd->port_hdrqfull_poll = pd->port_hdrqfull;
|
||||
|
||||
/*
|
||||
* now enable the port; the tail registers will be written to memory
|
||||
* by the chip as soon as it sees the write to
|
||||
* dd->ipath_kregs->kr_rcvctrl. The update only happens on
|
||||
* transition from 0 to 1, so clear it first, then set it as part of
|
||||
* enabling the port. This will (very briefly) affect any other
|
||||
* open ports, but it shouldn't be long enough to be an issue.
|
||||
* We explictly set the in-memory copy to 0 beforehand, so we don't
|
||||
* have to wait to be sure the DMA update has happened.
|
||||
* Now enable the port for receive.
|
||||
* For chips that are set to DMA the tail register to memory
|
||||
* when they change (and when the update bit transitions from
|
||||
* 0 to 1. So for those chips, we turn it off and then back on.
|
||||
* This will (very briefly) affect any other open ports, but the
|
||||
* duration is very short, and therefore isn't an issue. We
|
||||
* explictly set the in-memory tail copy to 0 beforehand, so we
|
||||
* don't have to wait to be sure the DMA update has happened
|
||||
* (chip resets head/tail to 0 on transition to enable).
|
||||
*/
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
set_bit(dd->ipath_r_portenable_shift + pd->port_port,
|
||||
&dd->ipath_rcvctrl);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl &
|
||||
~(1ULL << dd->ipath_r_tailupd_shift));
|
||||
}
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
/* Notify any waiting slaves */
|
||||
@ -1965,14 +2020,15 @@ static void unlock_expected_tids(struct ipath_portdata *pd)
|
||||
ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
|
||||
pd->port_port);
|
||||
for (i = port_tidbase; i < maxtid; i++) {
|
||||
if (!dd->ipath_pageshadow[i])
|
||||
struct page *ps = dd->ipath_pageshadow[i];
|
||||
|
||||
if (!ps)
|
||||
continue;
|
||||
|
||||
dd->ipath_pageshadow[i] = NULL;
|
||||
pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
|
||||
1);
|
||||
dd->ipath_pageshadow[i] = NULL;
|
||||
ipath_release_user_pages_on_close(&ps, 1);
|
||||
cnt++;
|
||||
ipath_stats.sps_pageunlocks++;
|
||||
}
|
||||
@ -2007,6 +2063,13 @@ static int ipath_close(struct inode *in, struct file *fp)
|
||||
mutex_unlock(&ipath_mutex);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd = pd->port_dd;
|
||||
|
||||
/* drain user sdma queue */
|
||||
ipath_user_sdma_queue_drain(dd, fd->pq);
|
||||
ipath_user_sdma_queue_destroy(fd->pq);
|
||||
|
||||
if (--pd->port_cnt) {
|
||||
/*
|
||||
* XXX If the master closes the port before the slave(s),
|
||||
@ -2019,7 +2082,6 @@ static int ipath_close(struct inode *in, struct file *fp)
|
||||
goto bail;
|
||||
}
|
||||
port = pd->port_port;
|
||||
dd = pd->port_dd;
|
||||
|
||||
if (pd->port_hdrqfull) {
|
||||
ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
|
||||
@ -2039,7 +2101,7 @@ static int ipath_close(struct inode *in, struct file *fp)
|
||||
pd->port_rcvnowait = pd->port_pionowait = 0;
|
||||
}
|
||||
if (pd->port_flag) {
|
||||
ipath_dbg("port %u port_flag still set to 0x%lx\n",
|
||||
ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
|
||||
pd->port_port, pd->port_flag);
|
||||
pd->port_flag = 0;
|
||||
}
|
||||
@ -2076,6 +2138,7 @@ static int ipath_close(struct inode *in, struct file *fp)
|
||||
|
||||
i = dd->ipath_pbufsport * (port - 1);
|
||||
ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
|
||||
ipath_chg_pioavailkernel(dd, i, dd->ipath_pbufsport, 1);
|
||||
|
||||
dd->ipath_f_clear_tids(dd, pd->port_port);
|
||||
|
||||
@ -2140,17 +2203,31 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
|
||||
static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
|
||||
u32 __user *inflightp)
|
||||
{
|
||||
unsigned long flags;
|
||||
const u32 val = ipath_user_sdma_inflight_counter(pq);
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
if (put_user(val, inflightp))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipath_sdma_get_complete(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
u32 __user *completep)
|
||||
{
|
||||
u32 val;
|
||||
int err;
|
||||
|
||||
err = ipath_user_sdma_make_progress(dd, pq);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = ipath_user_sdma_complete_counter(pq);
|
||||
if (put_user(val, completep))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2229,6 +2306,16 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
|
||||
dest = &cmd.cmd.armlaunch_ctrl;
|
||||
src = &ucmd->cmd.armlaunch_ctrl;
|
||||
break;
|
||||
case IPATH_CMD_SDMA_INFLIGHT:
|
||||
copy = sizeof(cmd.cmd.sdma_inflight);
|
||||
dest = &cmd.cmd.sdma_inflight;
|
||||
src = &ucmd->cmd.sdma_inflight;
|
||||
break;
|
||||
case IPATH_CMD_SDMA_COMPLETE:
|
||||
copy = sizeof(cmd.cmd.sdma_complete);
|
||||
dest = &cmd.cmd.sdma_complete;
|
||||
src = &ucmd->cmd.sdma_complete;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
@ -2299,7 +2386,7 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
|
||||
cmd.cmd.slave_mask_addr);
|
||||
break;
|
||||
case IPATH_CMD_PIOAVAILUPD:
|
||||
ret = ipath_force_pio_avail_update(pd->port_dd);
|
||||
ipath_force_pio_avail_update(pd->port_dd);
|
||||
break;
|
||||
case IPATH_CMD_POLL_TYPE:
|
||||
pd->poll_type = cmd.cmd.poll_type;
|
||||
@ -2310,6 +2397,17 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
|
||||
else
|
||||
ipath_disable_armlaunch(pd->port_dd);
|
||||
break;
|
||||
case IPATH_CMD_SDMA_INFLIGHT:
|
||||
ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
|
||||
(u32 __user *) (unsigned long)
|
||||
cmd.cmd.sdma_inflight);
|
||||
break;
|
||||
case IPATH_CMD_SDMA_COMPLETE:
|
||||
ret = ipath_sdma_get_complete(pd->port_dd,
|
||||
user_sdma_queue_fp(fp),
|
||||
(u32 __user *) (unsigned long)
|
||||
cmd.cmd.sdma_complete);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret >= 0)
|
||||
@ -2319,6 +2417,20 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov,
|
||||
unsigned long dim, loff_t off)
|
||||
{
|
||||
struct file *filp = iocb->ki_filp;
|
||||
struct ipath_filedata *fp = filp->private_data;
|
||||
struct ipath_portdata *pd = port_fp(filp);
|
||||
struct ipath_user_sdma_queue *pq = fp->pq;
|
||||
|
||||
if (!dim)
|
||||
return -EINVAL;
|
||||
|
||||
return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim);
|
||||
}
|
||||
|
||||
static struct class *ipath_class;
|
||||
|
||||
static int init_cdev(int minor, char *name, const struct file_operations *fops,
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/htirq.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_registers.h"
|
||||
@ -305,7 +306,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
|
||||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
|
||||
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
||||
#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
|
||||
@ -476,7 +479,13 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
|
||||
#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
|
||||
<< INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
|
||||
|
||||
static int ipath_ht_txe_recover(struct ipath_devdata *);
|
||||
static void ipath_ht_txe_recover(struct ipath_devdata *dd)
|
||||
{
|
||||
++ipath_stats.sps_txeparity;
|
||||
dev_info(&dd->pcidev->dev,
|
||||
"Recovering from TXE PIO parity error\n");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ipath_ht_handle_hwerrors - display hardware errors.
|
||||
@ -557,11 +566,11 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
||||
* occur if a processor speculative read is done to the PIO
|
||||
* buffer while we are sending a packet, for example.
|
||||
*/
|
||||
if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd))
|
||||
if (hwerrs & TXE_PIO_PARITY) {
|
||||
ipath_ht_txe_recover(dd);
|
||||
hwerrs &= ~TXE_PIO_PARITY;
|
||||
if (hwerrs & RXE_EAGER_PARITY)
|
||||
ipath_dev_err(dd, "RXE parity, Eager TID error is not "
|
||||
"recoverable\n");
|
||||
}
|
||||
|
||||
if (!hwerrs) {
|
||||
ipath_dbg("Clearing freezemode on ignored or "
|
||||
"recovered hardware error\n");
|
||||
@ -735,11 +744,10 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_32BITCOUNTERS;
|
||||
dd->ipath_flags |= IPATH_GPIO_INTR;
|
||||
if (dd->ipath_htspeed != 800)
|
||||
if (dd->ipath_lbus_speed != 800)
|
||||
ipath_dev_err(dd,
|
||||
"Incorrectly configured for HT @ %uMHz\n",
|
||||
dd->ipath_htspeed);
|
||||
ret = 0;
|
||||
dd->ipath_lbus_speed);
|
||||
|
||||
/*
|
||||
* set here, not in ipath_init_*_funcs because we have to do
|
||||
@ -839,7 +847,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
|
||||
/*
|
||||
* now write them back to clear the error.
|
||||
*/
|
||||
pci_write_config_byte(pdev, link_off,
|
||||
pci_write_config_word(pdev, link_off,
|
||||
linkctrl & (0xf << 8));
|
||||
}
|
||||
}
|
||||
@ -904,7 +912,7 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
|
||||
break;
|
||||
}
|
||||
|
||||
dd->ipath_htwidth = width;
|
||||
dd->ipath_lbus_width = width;
|
||||
|
||||
if (linkwidth != 0x11) {
|
||||
ipath_dev_err(dd, "Not configured for 16 bit HT "
|
||||
@ -952,8 +960,13 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
|
||||
speed = 200;
|
||||
break;
|
||||
}
|
||||
dd->ipath_htspeed = speed;
|
||||
dd->ipath_lbus_speed = speed;
|
||||
}
|
||||
|
||||
snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
|
||||
"HyperTransport,%uMHz,x%u\n",
|
||||
dd->ipath_lbus_speed,
|
||||
dd->ipath_lbus_width);
|
||||
}
|
||||
|
||||
static int ipath_ht_intconfig(struct ipath_devdata *dd)
|
||||
@ -1653,22 +1666,6 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
|
||||
}
|
||||
|
||||
|
||||
static int ipath_ht_txe_recover(struct ipath_devdata *dd)
|
||||
{
|
||||
int cnt = ++ipath_stats.sps_txeparity;
|
||||
if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
|
||||
if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
|
||||
ipath_dev_err(dd,
|
||||
"Too many attempts to recover from "
|
||||
"TXE parity, giving up\n");
|
||||
return 0;
|
||||
}
|
||||
dev_info(&dd->pcidev->dev,
|
||||
"Recovering from TXE PIO parity error\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ipath_init_ht_get_base_info - set chip-specific flags for user code
|
||||
* @dd: the infinipath device
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -38,7 +38,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_registers.h"
|
||||
@ -311,9 +311,14 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
||||
.cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
|
||||
};
|
||||
|
||||
/* kr_control bits */
|
||||
#define INFINIPATH_C_RESET 1U
|
||||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_MASK ((1U<<5)-1)
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<5)-1)
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
|
||||
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
||||
#define INFINIPATH_HWE_PCIEMEMPARITYERR_MASK 0x000000000000003fULL
|
||||
@ -338,6 +343,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
||||
#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
|
||||
#define INFINIPATH_EXTS_MEMBIST_FOUND 0x0000000000008000
|
||||
|
||||
/* kr_xgxsconfig bits */
|
||||
#define INFINIPATH_XGXS_RESET 0x5ULL
|
||||
|
||||
#define _IPATH_GPIO_SDA_NUM 1
|
||||
#define _IPATH_GPIO_SCL_NUM 0
|
||||
|
||||
@ -346,6 +354,16 @@ static const struct ipath_cregs ipath_pe_cregs = {
|
||||
#define IPATH_GPIO_SCL (1ULL << \
|
||||
(_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
|
||||
|
||||
#define INFINIPATH_RT_BUFSIZE_MASK 0xe0000000ULL
|
||||
#define INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid) \
|
||||
((((tid) & INFINIPATH_RT_BUFSIZE_MASK) >> 29) + 11 - 1)
|
||||
#define INFINIPATH_RT_BUFSIZE(tid) (1 << INFINIPATH_RT_BUFSIZE_SHIFTVAL(tid))
|
||||
#define INFINIPATH_RT_IS_VALID(tid) \
|
||||
(((tid) & INFINIPATH_RT_BUFSIZE_MASK) && \
|
||||
((((tid) & INFINIPATH_RT_BUFSIZE_MASK) != INFINIPATH_RT_BUFSIZE_MASK)))
|
||||
#define INFINIPATH_RT_ADDR_MASK 0x1FFFFFFFULL /* 29 bits valid */
|
||||
#define INFINIPATH_RT_ADDR_SHIFT 10
|
||||
|
||||
#define INFINIPATH_R_INTRAVAIL_SHIFT 16
|
||||
#define INFINIPATH_R_TAILUPD_SHIFT 31
|
||||
|
||||
@ -372,6 +390,8 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
|
||||
#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
|
||||
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
|
||||
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
|
||||
#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
|
||||
<< INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
|
||||
|
||||
static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
|
||||
u32, unsigned long);
|
||||
@ -450,10 +470,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
||||
* make sure we get this much out, unless told to be quiet,
|
||||
* or it's occurred within the last 5 seconds
|
||||
*/
|
||||
if ((hwerrs & ~(dd->ipath_lasthwerror |
|
||||
((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
|
||||
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
|
||||
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
|
||||
if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
|
||||
RXE_EAGER_PARITY)) ||
|
||||
(ipath_debug & __IPATH_VERBDBG))
|
||||
dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
|
||||
"(cleared)\n", (unsigned long long) hwerrs);
|
||||
@ -465,7 +483,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
||||
(hwerrs & ~dd->ipath_hwe_bitsextant));
|
||||
|
||||
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
|
||||
if (ctrl & INFINIPATH_C_FREEZEMODE) {
|
||||
if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
|
||||
/*
|
||||
* parity errors in send memory are recoverable,
|
||||
* just cancel the send (if indicated in * sendbuffererror),
|
||||
@ -540,12 +558,40 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
|
||||
dd->ipath_hwerrmask);
|
||||
}
|
||||
|
||||
if (*msg)
|
||||
ipath_dev_err(dd, "%s hardware error\n", msg);
|
||||
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
|
||||
if (hwerrs) {
|
||||
/*
|
||||
* for /sys status file ; if no trailing } is copied, we'll
|
||||
* know it was truncated.
|
||||
* if any set that we aren't ignoring; only
|
||||
* make the complaint once, in case it's stuck
|
||||
* or recurring, and we get here multiple
|
||||
* times.
|
||||
*/
|
||||
ipath_dev_err(dd, "%s hardware error\n", msg);
|
||||
if (dd->ipath_flags & IPATH_INITTED) {
|
||||
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
|
||||
ipath_setup_pe_setextled(dd,
|
||||
INFINIPATH_IBCS_L_STATE_DOWN,
|
||||
INFINIPATH_IBCS_LT_STATE_DISABLED);
|
||||
ipath_dev_err(dd, "Fatal Hardware Error (freeze "
|
||||
"mode), no longer usable, SN %.16s\n",
|
||||
dd->ipath_serial);
|
||||
isfatal = 1;
|
||||
}
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
||||
/* mark as having had error */
|
||||
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
|
||||
/*
|
||||
* mark as not usable, at a minimum until driver
|
||||
* is reloaded, probably until reboot, since no
|
||||
* other reset is possible.
|
||||
*/
|
||||
dd->ipath_flags &= ~IPATH_INITTED;
|
||||
} else
|
||||
*msg = 0; /* recovered from all of them */
|
||||
|
||||
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg && msg) {
|
||||
/*
|
||||
* for /sys status file ; if no trailing brace is copied,
|
||||
* we'll know it was truncated.
|
||||
*/
|
||||
snprintf(dd->ipath_freezemsg, dd->ipath_freezelen,
|
||||
"{%s}", msg);
|
||||
@ -610,7 +656,6 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
|
||||
dd->ipath_f_put_tid = ipath_pe_put_tid_2;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* set here, not in ipath_init_*_funcs because we have to do
|
||||
* it after we can read chip registers.
|
||||
@ -838,7 +883,7 @@ static void ipath_setup_pe_setextled(struct ipath_devdata *dd, u64 lst,
|
||||
extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
|
||||
INFINIPATH_EXTC_LED2PRIPORT_ON);
|
||||
|
||||
if (ltst & INFINIPATH_IBCS_LT_STATE_LINKUP)
|
||||
if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
|
||||
extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
|
||||
if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
|
||||
extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
|
||||
@ -863,6 +908,62 @@ static void ipath_setup_pe_cleanup(struct ipath_devdata *dd)
|
||||
pci_disable_msi(dd->pcidev);
|
||||
}
|
||||
|
||||
static void ipath_6120_pcie_params(struct ipath_devdata *dd)
|
||||
{
|
||||
u16 linkstat, speed;
|
||||
int pos;
|
||||
|
||||
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
|
||||
if (!pos) {
|
||||
ipath_dev_err(dd, "Can't find PCI Express capability!\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
|
||||
&linkstat);
|
||||
/*
|
||||
* speed is bits 0-4, linkwidth is bits 4-8
|
||||
* no defines for them in headers
|
||||
*/
|
||||
speed = linkstat & 0xf;
|
||||
linkstat >>= 4;
|
||||
linkstat &= 0x1f;
|
||||
dd->ipath_lbus_width = linkstat;
|
||||
|
||||
switch (speed) {
|
||||
case 1:
|
||||
dd->ipath_lbus_speed = 2500; /* Gen1, 2.5GHz */
|
||||
break;
|
||||
case 2:
|
||||
dd->ipath_lbus_speed = 5000; /* Gen1, 5GHz */
|
||||
break;
|
||||
default: /* not defined, assume gen1 */
|
||||
dd->ipath_lbus_speed = 2500;
|
||||
break;
|
||||
}
|
||||
|
||||
if (linkstat < 8)
|
||||
ipath_dev_err(dd,
|
||||
"PCIe width %u (x8 HCA), performance reduced\n",
|
||||
linkstat);
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "PCIe speed %u width %u (x8 HCA)\n",
|
||||
dd->ipath_lbus_speed, linkstat);
|
||||
|
||||
if (speed != 1)
|
||||
ipath_dev_err(dd,
|
||||
"PCIe linkspeed %u is incorrect; "
|
||||
"should be 1 (2500)!\n", speed);
|
||||
bail:
|
||||
/* fill in string, even on errors */
|
||||
snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
|
||||
"PCIe,%uMHz,x%u\n",
|
||||
dd->ipath_lbus_speed,
|
||||
dd->ipath_lbus_width);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_setup_pe_config - setup PCIe config related stuff
|
||||
* @dd: the infinipath device
|
||||
@ -920,19 +1021,8 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
|
||||
} else
|
||||
ipath_dev_err(dd, "Can't find MSI capability, "
|
||||
"can't save MSI settings for reset\n");
|
||||
if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP))) {
|
||||
u16 linkstat;
|
||||
pci_read_config_word(dd->pcidev, pos + PCI_EXP_LNKSTA,
|
||||
&linkstat);
|
||||
linkstat >>= 4;
|
||||
linkstat &= 0x1f;
|
||||
if (linkstat != 8)
|
||||
ipath_dev_err(dd, "PCIe width %u, "
|
||||
"performance reduced\n", linkstat);
|
||||
}
|
||||
else
|
||||
ipath_dev_err(dd, "Can't find PCI Express "
|
||||
"capability!\n");
|
||||
|
||||
ipath_6120_pcie_params(dd);
|
||||
|
||||
dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
|
||||
dd->ipath_link_speed_supported = IPATH_IB_SDR;
|
||||
@ -1065,10 +1155,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
|
||||
INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
|
||||
INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
|
||||
|
||||
dd->ipath_eep_st_masks[2].errs_to_log =
|
||||
INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
|
||||
|
||||
|
||||
dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
|
||||
dd->delay_mult = 2; /* SDR, 4X, can't change */
|
||||
}
|
||||
|
||||
@ -1142,6 +1229,9 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
|
||||
u64 val;
|
||||
int i;
|
||||
int ret;
|
||||
u16 cmdval;
|
||||
|
||||
pci_read_config_word(dd->pcidev, PCI_COMMAND, &cmdval);
|
||||
|
||||
/* Use ERROR so it shows up in logs, etc. */
|
||||
ipath_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->ipath_unit);
|
||||
@ -1169,10 +1259,14 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
|
||||
ipath_dev_err(dd, "rewrite of BAR1 failed: %d\n",
|
||||
r);
|
||||
/* now re-enable memory access */
|
||||
pci_write_config_word(dd->pcidev, PCI_COMMAND, cmdval);
|
||||
if ((r = pci_enable_device(dd->pcidev)))
|
||||
ipath_dev_err(dd, "pci_enable_device failed after "
|
||||
"reset: %d\n", r);
|
||||
/* whether it worked or not, mark as present, again */
|
||||
/*
|
||||
* whether it fully enabled or not, mark as present,
|
||||
* again (but not INITTED)
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_PRESENT;
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
|
||||
if (val == dd->ipath_revision) {
|
||||
@ -1190,6 +1284,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
|
||||
ret = 0; /* failed */
|
||||
|
||||
bail:
|
||||
if (ret)
|
||||
ipath_6120_pcie_params(dd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1209,16 +1305,21 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
||||
{
|
||||
u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
|
||||
unsigned long flags = 0; /* keep gcc quiet */
|
||||
int tidx;
|
||||
spinlock_t *tidlockp;
|
||||
|
||||
if (!dd->ipath_kregbase)
|
||||
return;
|
||||
|
||||
if (pa != dd->ipath_tidinvalid) {
|
||||
if (pa & ((1U << 11) - 1)) {
|
||||
dev_info(&dd->pcidev->dev, "BUG: physaddr %lx "
|
||||
"not 4KB aligned!\n", pa);
|
||||
"not 2KB aligned!\n", pa);
|
||||
return;
|
||||
}
|
||||
pa >>= 11;
|
||||
/* paranoia check */
|
||||
if (pa & (7<<29))
|
||||
if (pa & ~INFINIPATH_RT_ADDR_MASK)
|
||||
ipath_dev_err(dd,
|
||||
"BUG: Physical page address 0x%lx "
|
||||
"has bits set in 31-29\n", pa);
|
||||
@ -1238,14 +1339,22 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
||||
* call can be done from interrupt level for the port 0 eager TIDs,
|
||||
* so we have to use irqsave locks.
|
||||
*/
|
||||
spin_lock_irqsave(&dd->ipath_tid_lock, flags);
|
||||
/*
|
||||
* Assumes tidptr always > ipath_egrtidbase
|
||||
* if type == RCVHQ_RCV_TYPE_EAGER.
|
||||
*/
|
||||
tidx = tidptr - dd->ipath_egrtidbase;
|
||||
|
||||
tidlockp = (type == RCVHQ_RCV_TYPE_EAGER && tidx < dd->ipath_rcvegrcnt)
|
||||
? &dd->ipath_kernel_tid_lock : &dd->ipath_user_tid_lock;
|
||||
spin_lock_irqsave(tidlockp, flags);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xfeeddeaf);
|
||||
if (dd->ipath_kregbase)
|
||||
writel(pa, tidp32);
|
||||
writel(pa, tidp32);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_scratch, 0xdeadbeef);
|
||||
mmiowb();
|
||||
spin_unlock_irqrestore(&dd->ipath_tid_lock, flags);
|
||||
spin_unlock_irqrestore(tidlockp, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher
|
||||
* @dd: the infinipath device
|
||||
@ -1261,6 +1370,10 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
||||
u32 type, unsigned long pa)
|
||||
{
|
||||
u32 __iomem *tidp32 = (u32 __iomem *)tidptr;
|
||||
u32 tidx;
|
||||
|
||||
if (!dd->ipath_kregbase)
|
||||
return;
|
||||
|
||||
if (pa != dd->ipath_tidinvalid) {
|
||||
if (pa & ((1U << 11) - 1)) {
|
||||
@ -1270,7 +1383,7 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
||||
}
|
||||
pa >>= 11;
|
||||
/* paranoia check */
|
||||
if (pa & (7<<29))
|
||||
if (pa & ~INFINIPATH_RT_ADDR_MASK)
|
||||
ipath_dev_err(dd,
|
||||
"BUG: Physical page address 0x%lx "
|
||||
"has bits set in 31-29\n", pa);
|
||||
@ -1280,8 +1393,8 @@ static void ipath_pe_put_tid_2(struct ipath_devdata *dd, u64 __iomem *tidptr,
|
||||
else /* for now, always full 4KB page */
|
||||
pa |= 2 << 29;
|
||||
}
|
||||
if (dd->ipath_kregbase)
|
||||
writel(pa, tidp32);
|
||||
tidx = tidptr - dd->ipath_egrtidbase;
|
||||
writel(pa, tidp32);
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
@ -1379,17 +1492,13 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
|
||||
dd->ipath_egrtidbase = (u64 __iomem *)
|
||||
((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
|
||||
|
||||
/*
|
||||
* To truly support a 4KB MTU (for usermode), we need to
|
||||
* bump this to a larger value. For now, we use them for
|
||||
* the kernel only.
|
||||
*/
|
||||
dd->ipath_rcvegrbufsize = 2048;
|
||||
dd->ipath_rcvegrbufsize = ipath_mtu4096 ? 4096 : 2048;
|
||||
/*
|
||||
* the min() check here is currently a nop, but it may not always
|
||||
* be, depending on just how we do ipath_rcvegrbufsize
|
||||
*/
|
||||
dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
|
||||
dd->ipath_ibmaxlen = min(ipath_mtu4096 ? dd->ipath_piosize4k :
|
||||
dd->ipath_piosize2k,
|
||||
dd->ipath_rcvegrbufsize +
|
||||
(dd->ipath_rcvhdrentsize << 2));
|
||||
dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
|
||||
|
2571
drivers/infiniband/hw/ipath/ipath_iba7220.c
Normal file
2571
drivers/infiniband/hw/ipath/ipath_iba7220.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -155,24 +155,13 @@ static int bringup_link(struct ipath_devdata *dd)
|
||||
dd->ipath_control);
|
||||
|
||||
/*
|
||||
* Note that prior to try 14 or 15 of IB, the credit scaling
|
||||
* wasn't working, because it was swapped for writes with the
|
||||
* 1 bit default linkstate field
|
||||
* set initial max size pkt IBC will send, including ICRC; it's the
|
||||
* PIO buffer size in dwords, less 1; also see ipath_set_mtu()
|
||||
*/
|
||||
val = (dd->ipath_ibmaxlen >> 2) + 1;
|
||||
ibc = val << dd->ibcc_mpl_shift;
|
||||
|
||||
/* ignore pbc and align word */
|
||||
val = dd->ipath_piosize2k - 2 * sizeof(u32);
|
||||
/*
|
||||
* for ICRC, which we only send in diag test pkt mode, and we
|
||||
* don't need to worry about that for mtu
|
||||
*/
|
||||
val += 1;
|
||||
/*
|
||||
* Set the IBC maxpktlength to the size of our pio buffers the
|
||||
* maxpktlength is in words. This is *not* the IB data MTU.
|
||||
*/
|
||||
ibc = (val / sizeof(u32)) << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
|
||||
/* in KB */
|
||||
/* flowcontrolwatermark is in units of KBytes */
|
||||
ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
|
||||
/*
|
||||
* How often flowctrl sent. More or less in usecs; balance against
|
||||
@ -191,10 +180,13 @@ static int bringup_link(struct ipath_devdata *dd)
|
||||
/*
|
||||
* Want to start out with both LINKCMD and LINKINITCMD in NOP
|
||||
* (0 and 0). Don't put linkinitcmd in ipath_ibcctrl, want that
|
||||
* to stay a NOP
|
||||
* to stay a NOP. Flag that we are disabled, for the (unlikely)
|
||||
* case that some recovery path is trying to bring the link up
|
||||
* before we are ready.
|
||||
*/
|
||||
ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
|
||||
INFINIPATH_IBCC_LINKINITCMD_SHIFT;
|
||||
dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
|
||||
ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
|
||||
(unsigned long long) ibc);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
|
||||
@ -227,17 +219,26 @@ static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
|
||||
pd->port_cnt = 1;
|
||||
/* The port 0 pkey table is used by the layer interface. */
|
||||
pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
|
||||
pd->port_seq_cnt = 1;
|
||||
}
|
||||
return pd;
|
||||
}
|
||||
|
||||
static int init_chip_first(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp)
|
||||
static int init_chip_first(struct ipath_devdata *dd)
|
||||
{
|
||||
struct ipath_portdata *pd = NULL;
|
||||
struct ipath_portdata *pd;
|
||||
int ret = 0;
|
||||
u64 val;
|
||||
|
||||
spin_lock_init(&dd->ipath_kernel_tid_lock);
|
||||
spin_lock_init(&dd->ipath_user_tid_lock);
|
||||
spin_lock_init(&dd->ipath_sendctrl_lock);
|
||||
spin_lock_init(&dd->ipath_sdma_lock);
|
||||
spin_lock_init(&dd->ipath_gpio_lock);
|
||||
spin_lock_init(&dd->ipath_eep_st_lock);
|
||||
spin_lock_init(&dd->ipath_sdepb_lock);
|
||||
mutex_init(&dd->ipath_eep_lock);
|
||||
|
||||
/*
|
||||
* skip cfgports stuff because we are not allocating memory,
|
||||
* and we don't want problems if the portcnt changed due to
|
||||
@ -250,12 +251,14 @@ static int init_chip_first(struct ipath_devdata *dd,
|
||||
else if (ipath_cfgports <= dd->ipath_portcnt) {
|
||||
dd->ipath_cfgports = ipath_cfgports;
|
||||
ipath_dbg("Configured to use %u ports out of %u in chip\n",
|
||||
dd->ipath_cfgports, dd->ipath_portcnt);
|
||||
dd->ipath_cfgports, ipath_read_kreg32(dd,
|
||||
dd->ipath_kregs->kr_portcnt));
|
||||
} else {
|
||||
dd->ipath_cfgports = dd->ipath_portcnt;
|
||||
ipath_dbg("Tried to configured to use %u ports; chip "
|
||||
"only supports %u\n", ipath_cfgports,
|
||||
dd->ipath_portcnt);
|
||||
ipath_read_kreg32(dd,
|
||||
dd->ipath_kregs->kr_portcnt));
|
||||
}
|
||||
/*
|
||||
* Allocate full portcnt array, rather than just cfgports, because
|
||||
@ -295,12 +298,9 @@ static int init_chip_first(struct ipath_devdata *dd,
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
|
||||
dd->ipath_piosize2k = val & ~0U;
|
||||
dd->ipath_piosize4k = val >> 32;
|
||||
/*
|
||||
* Note: the chips support a maximum MTU of 4096, but the driver
|
||||
* hasn't implemented this feature yet, so set the initial value
|
||||
* to 2048.
|
||||
*/
|
||||
dd->ipath_ibmtu = 2048;
|
||||
if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
|
||||
ipath_mtu4096 = 0; /* 4KB not supported by this chip */
|
||||
dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
|
||||
dd->ipath_piobcnt2k = val & ~0U;
|
||||
dd->ipath_piobcnt4k = val >> 32;
|
||||
@ -328,43 +328,46 @@ static int init_chip_first(struct ipath_devdata *dd,
|
||||
else ipath_dbg("%u 2k piobufs @ %p\n",
|
||||
dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
|
||||
|
||||
spin_lock_init(&dd->ipath_tid_lock);
|
||||
spin_lock_init(&dd->ipath_sendctrl_lock);
|
||||
spin_lock_init(&dd->ipath_gpio_lock);
|
||||
spin_lock_init(&dd->ipath_eep_st_lock);
|
||||
mutex_init(&dd->ipath_eep_lock);
|
||||
|
||||
done:
|
||||
*pdp = pd;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_chip_reset - re-initialize after a reset, or enable
|
||||
* @dd: the infinipath device
|
||||
* @pdp: output for port data
|
||||
*
|
||||
* sanity check at least some of the values after reset, and
|
||||
* ensure no receive or transmit (explictly, in case reset
|
||||
* failed
|
||||
*/
|
||||
static int init_chip_reset(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp)
|
||||
static int init_chip_reset(struct ipath_devdata *dd)
|
||||
{
|
||||
u32 rtmp;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
*pdp = dd->ipath_pd[0];
|
||||
/* ensure chip does no sends or receives while we re-initialize */
|
||||
dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
|
||||
/*
|
||||
* ensure chip does no sends or receives, tail updates, or
|
||||
* pioavail updates while we re-initialize
|
||||
*/
|
||||
dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
|
||||
for (i = 0; i < dd->ipath_portcnt; i++) {
|
||||
clear_bit(dd->ipath_r_portenable_shift + i,
|
||||
&dd->ipath_rcvctrl);
|
||||
clear_bit(dd->ipath_r_intravail_shift + i,
|
||||
&dd->ipath_rcvctrl);
|
||||
}
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl = 0U; /* no sdma, etc */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
|
||||
|
||||
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
|
||||
if (dd->ipath_portcnt != rtmp)
|
||||
dev_info(&dd->pcidev->dev, "portcnt was %u before "
|
||||
"reset, now %u, using original\n",
|
||||
dd->ipath_portcnt, rtmp);
|
||||
rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
|
||||
if (rtmp != dd->ipath_rcvtidcnt)
|
||||
dev_info(&dd->pcidev->dev, "tidcnt was %u before "
|
||||
@ -467,10 +470,10 @@ static void init_shadow_tids(struct ipath_devdata *dd)
|
||||
dd->ipath_physshadow = addrs;
|
||||
}
|
||||
|
||||
static void enable_chip(struct ipath_devdata *dd,
|
||||
struct ipath_portdata *pd, int reinit)
|
||||
static void enable_chip(struct ipath_devdata *dd, int reinit)
|
||||
{
|
||||
u32 val;
|
||||
u64 rcvmask;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
@ -484,17 +487,28 @@ static void enable_chip(struct ipath_devdata *dd,
|
||||
/* Enable PIO send, and update of PIOavail regs to memory. */
|
||||
dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
|
||||
INFINIPATH_S_PIOBUFAVAILUPD;
|
||||
|
||||
/*
|
||||
* Set the PIO avail update threshold to host memory
|
||||
* on chips that support it.
|
||||
*/
|
||||
if (dd->ipath_pioupd_thresh)
|
||||
dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
|
||||
<< INFINIPATH_S_UPDTHRESH_SHIFT;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
/*
|
||||
* enable port 0 receive, and receive interrupt. other ports
|
||||
* done as user opens and inits them.
|
||||
* Enable kernel ports' receive and receive interrupt.
|
||||
* Other ports done as user opens and inits them.
|
||||
*/
|
||||
dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
|
||||
(1ULL << dd->ipath_r_portenable_shift) |
|
||||
(1ULL << dd->ipath_r_intravail_shift);
|
||||
rcvmask = 1ULL;
|
||||
dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
|
||||
(rcvmask << dd->ipath_r_intravail_shift);
|
||||
if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
|
||||
dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
|
||||
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
dd->ipath_rcvctrl);
|
||||
|
||||
@ -505,16 +519,16 @@ static void enable_chip(struct ipath_devdata *dd,
|
||||
dd->ipath_flags |= IPATH_INITTED;
|
||||
|
||||
/*
|
||||
* init our shadow copies of head from tail values, and write
|
||||
* head values to match.
|
||||
* Init our shadow copies of head from tail values,
|
||||
* and write head values to match.
|
||||
*/
|
||||
val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
|
||||
(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
|
||||
ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
|
||||
|
||||
/* Initialize so we interrupt on next packet received */
|
||||
(void)ipath_write_ureg(dd, ur_rcvhdrhead,
|
||||
dd->ipath_rhdrhead_intr_off |
|
||||
dd->ipath_pd[0]->port_head, 0);
|
||||
ipath_write_ureg(dd, ur_rcvhdrhead,
|
||||
dd->ipath_rhdrhead_intr_off |
|
||||
dd->ipath_pd[0]->port_head, 0);
|
||||
|
||||
/*
|
||||
* by now pioavail updates to memory should have occurred, so
|
||||
@ -523,25 +537,26 @@ static void enable_chip(struct ipath_devdata *dd,
|
||||
* initial values of the generation bit correct.
|
||||
*/
|
||||
for (i = 0; i < dd->ipath_pioavregs; i++) {
|
||||
__le64 val;
|
||||
__le64 pioavail;
|
||||
|
||||
/*
|
||||
* Chip Errata bug 6641; even and odd qwords>3 are swapped.
|
||||
*/
|
||||
if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
|
||||
val = dd->ipath_pioavailregs_dma[i ^ 1];
|
||||
pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
|
||||
else
|
||||
val = dd->ipath_pioavailregs_dma[i];
|
||||
dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
|
||||
pioavail = dd->ipath_pioavailregs_dma[i];
|
||||
dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail) |
|
||||
(~dd->ipath_pioavailkernel[i] <<
|
||||
INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
|
||||
}
|
||||
/* can get counters, stats, etc. */
|
||||
dd->ipath_flags |= IPATH_PRESENT;
|
||||
}
|
||||
|
||||
static int init_housekeeping(struct ipath_devdata *dd,
|
||||
struct ipath_portdata **pdp, int reinit)
|
||||
static int init_housekeeping(struct ipath_devdata *dd, int reinit)
|
||||
{
|
||||
char boardn[32];
|
||||
char boardn[40];
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
@ -600,18 +615,9 @@ static int init_housekeeping(struct ipath_devdata *dd,
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
|
||||
INFINIPATH_E_RESET);
|
||||
|
||||
if (reinit)
|
||||
ret = init_chip_reset(dd, pdp);
|
||||
else
|
||||
ret = init_chip_first(dd, pdp);
|
||||
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, "
|
||||
"%u egrtids\n", (unsigned long long) dd->ipath_revision,
|
||||
dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt,
|
||||
dd->ipath_rcvegrcnt);
|
||||
ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
|
||||
(unsigned long long) dd->ipath_revision,
|
||||
dd->ipath_pcirev);
|
||||
|
||||
if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
|
||||
INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
|
||||
@ -650,10 +656,39 @@ static int init_housekeeping(struct ipath_devdata *dd,
|
||||
|
||||
ipath_dbg("%s", dd->ipath_boardversion);
|
||||
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
if (reinit)
|
||||
ret = init_chip_reset(dd);
|
||||
else
|
||||
ret = init_chip_first(dd);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void verify_interrupt(unsigned long opaque)
|
||||
{
|
||||
struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
|
||||
|
||||
if (!dd)
|
||||
return; /* being torn down */
|
||||
|
||||
/*
|
||||
* If we don't have any interrupts, let the user know and
|
||||
* don't bother checking again.
|
||||
*/
|
||||
if (dd->ipath_int_counter == 0) {
|
||||
if (!dd->ipath_f_intr_fallback(dd))
|
||||
dev_err(&dd->pcidev->dev, "No interrupts detected, "
|
||||
"not usable.\n");
|
||||
else /* re-arm the timer to see if fallback works */
|
||||
mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
|
||||
} else
|
||||
ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
|
||||
dd->ipath_int_counter);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_init_chip - do the actual initialization sequence on the chip
|
||||
@ -676,11 +711,11 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
u32 val32, kpiobufs;
|
||||
u32 piobufs, uports;
|
||||
u64 val;
|
||||
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
|
||||
struct ipath_portdata *pd;
|
||||
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
|
||||
unsigned long flags;
|
||||
|
||||
ret = init_housekeeping(dd, &pd, reinit);
|
||||
ret = init_housekeeping(dd, reinit);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
@ -700,7 +735,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
* we now use routines that backend onto __get_free_pages, the
|
||||
* rest would be wasted.
|
||||
*/
|
||||
dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt;
|
||||
dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
|
||||
dd->ipath_rcvhdrcnt);
|
||||
|
||||
@ -731,8 +766,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
|
||||
int i = (int) piobufs -
|
||||
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
|
||||
if (i < 0)
|
||||
i = 0;
|
||||
if (i < 1)
|
||||
i = 1;
|
||||
dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
|
||||
"%d for kernel leaves too few for %d user ports "
|
||||
"(%d each); using %u\n", kpiobufs,
|
||||
@ -751,24 +786,40 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
ipath_dbg("allocating %u pbufs/port leaves %u unused, "
|
||||
"add to kernel\n", dd->ipath_pbufsport, val32);
|
||||
dd->ipath_lastport_piobuf -= val32;
|
||||
kpiobufs += val32;
|
||||
ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n",
|
||||
dd->ipath_pbufsport, val32);
|
||||
}
|
||||
dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
|
||||
dd->ipath_lastpioindex = 0;
|
||||
dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
|
||||
ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
|
||||
ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
|
||||
"each for %u user ports\n", kpiobufs,
|
||||
piobufs, dd->ipath_pbufsport, uports);
|
||||
if (dd->ipath_pioupd_thresh) {
|
||||
if (dd->ipath_pbufsport < dd->ipath_pioupd_thresh)
|
||||
dd->ipath_pioupd_thresh = dd->ipath_pbufsport;
|
||||
if (kpiobufs < dd->ipath_pioupd_thresh)
|
||||
dd->ipath_pioupd_thresh = kpiobufs;
|
||||
}
|
||||
|
||||
ret = dd->ipath_f_early_init(dd);
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "Early initialization failure\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
dd->ipath_f_early_init(dd);
|
||||
/*
|
||||
* cancel any possible active sends from early driver load.
|
||||
* Cancel any possible active sends from early driver load.
|
||||
* Follows early_init because some chips have to initialize
|
||||
* PIO buffers in early_init to avoid false parity errors.
|
||||
*/
|
||||
ipath_cancel_sends(dd, 0);
|
||||
|
||||
/* early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
||||
* done after early_init */
|
||||
/*
|
||||
* Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
|
||||
* done after early_init.
|
||||
*/
|
||||
dd->ipath_hdrqlast =
|
||||
dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
|
||||
@ -783,8 +834,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
goto done;
|
||||
}
|
||||
|
||||
(void)ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
|
||||
dd->ipath_pioavailregs_phys);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
|
||||
dd->ipath_pioavailregs_phys);
|
||||
/*
|
||||
* this is to detect s/w errors, which the h/w works around by
|
||||
* ignoring the low 6 bits of address, if it wasn't aligned.
|
||||
@ -843,58 +894,65 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
/* enable errors that are masked, at least this first time. */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||
~dd->ipath_maskederrs);
|
||||
dd->ipath_errormask = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_errormask);
|
||||
dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
|
||||
dd->ipath_errormask =
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
|
||||
/* clear any interrupts up to this point (ints still not enabled) */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
|
||||
|
||||
dd->ipath_f_tidtemplate(dd);
|
||||
|
||||
/*
|
||||
* Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
|
||||
* re-init, the simplest way to handle this is to free
|
||||
* existing, and re-allocate.
|
||||
* Need to re-create rest of port 0 portdata as well.
|
||||
*/
|
||||
pd = dd->ipath_pd[0];
|
||||
if (reinit) {
|
||||
/* Alloc and init new ipath_portdata for port0,
|
||||
struct ipath_portdata *npd;
|
||||
|
||||
/*
|
||||
* Alloc and init new ipath_portdata for port0,
|
||||
* Then free old pd. Could lead to fragmentation, but also
|
||||
* makes later support for hot-swap easier.
|
||||
*/
|
||||
struct ipath_portdata *npd;
|
||||
npd = create_portdata0(dd);
|
||||
if (npd) {
|
||||
ipath_free_pddata(dd, pd);
|
||||
dd->ipath_pd[0] = pd = npd;
|
||||
dd->ipath_pd[0] = npd;
|
||||
pd = npd;
|
||||
} else {
|
||||
ipath_dev_err(dd, "Unable to allocate portdata for"
|
||||
" port 0, failing\n");
|
||||
ipath_dev_err(dd, "Unable to allocate portdata"
|
||||
" for port 0, failing\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
dd->ipath_f_tidtemplate(dd);
|
||||
ret = ipath_create_rcvhdrq(dd, pd);
|
||||
if (!ret) {
|
||||
dd->ipath_hdrqtailptr =
|
||||
(volatile __le64 *)pd->port_rcvhdrtail_kvaddr;
|
||||
if (!ret)
|
||||
ret = create_port0_egr(dd);
|
||||
}
|
||||
if (ret)
|
||||
ipath_dev_err(dd, "failed to allocate port 0 (kernel) "
|
||||
if (ret) {
|
||||
ipath_dev_err(dd, "failed to allocate kernel port's "
|
||||
"rcvhdrq and/or egr bufs\n");
|
||||
goto done;
|
||||
}
|
||||
else
|
||||
enable_chip(dd, pd, reinit);
|
||||
enable_chip(dd, reinit);
|
||||
|
||||
|
||||
if (!ret && !reinit) {
|
||||
/* used when we close a port, for DMA already in flight at close */
|
||||
if (!reinit) {
|
||||
/*
|
||||
* Used when we close a port, for DMA already in flight
|
||||
* at close.
|
||||
*/
|
||||
dd->ipath_dummy_hdrq = dma_alloc_coherent(
|
||||
&dd->pcidev->dev, pd->port_rcvhdrq_size,
|
||||
&dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
|
||||
&dd->ipath_dummy_hdrq_phys,
|
||||
gfp_flags);
|
||||
if (!dd->ipath_dummy_hdrq ) {
|
||||
if (!dd->ipath_dummy_hdrq) {
|
||||
dev_info(&dd->pcidev->dev,
|
||||
"Couldn't allocate 0x%lx bytes for dummy hdrq\n",
|
||||
pd->port_rcvhdrq_size);
|
||||
dd->ipath_pd[0]->port_rcvhdrq_size);
|
||||
/* fallback to just 0'ing */
|
||||
dd->ipath_dummy_hdrq_phys = 0UL;
|
||||
}
|
||||
@ -906,7 +964,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
*/
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
|
||||
|
||||
if(!dd->ipath_stats_timer_active) {
|
||||
if (!dd->ipath_stats_timer_active) {
|
||||
/*
|
||||
* first init, or after an admin disable/enable
|
||||
* set up stats retrieval timer, even if we had errors
|
||||
@ -922,6 +980,16 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
dd->ipath_stats_timer_active = 1;
|
||||
}
|
||||
|
||||
/* Set up SendDMA if chip supports it */
|
||||
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
|
||||
ret = setup_sdma(dd);
|
||||
|
||||
/* Set up HoL state */
|
||||
init_timer(&dd->ipath_hol_timer);
|
||||
dd->ipath_hol_timer.function = ipath_hol_event;
|
||||
dd->ipath_hol_timer.data = (unsigned long)dd;
|
||||
dd->ipath_hol_state = IPATH_HOL_UP;
|
||||
|
||||
done:
|
||||
if (!ret) {
|
||||
*dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
|
||||
@ -934,6 +1002,20 @@ done:
|
||||
0ULL);
|
||||
/* chip is usable; mark it as initialized */
|
||||
*dd->ipath_statusp |= IPATH_STATUS_INITTED;
|
||||
|
||||
/*
|
||||
* setup to verify we get an interrupt, and fallback
|
||||
* to an alternate if necessary and possible
|
||||
*/
|
||||
if (!reinit) {
|
||||
init_timer(&dd->ipath_intrchk_timer);
|
||||
dd->ipath_intrchk_timer.function =
|
||||
verify_interrupt;
|
||||
dd->ipath_intrchk_timer.data =
|
||||
(unsigned long) dd;
|
||||
}
|
||||
dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
|
||||
add_timer(&dd->ipath_intrchk_timer);
|
||||
} else
|
||||
ipath_dev_err(dd, "No interrupts enabled, couldn't "
|
||||
"setup interrupt address\n");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -32,6 +32,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_verbs.h"
|
||||
@ -59,9 +60,11 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
|
||||
dev_info(&dd->pcidev->dev,
|
||||
"Rewrite PIO buffer %u, to recover from parity error\n",
|
||||
pnum);
|
||||
*pbuf = dwcnt+1; /* no flush required, since already in freeze */
|
||||
while(--dwcnt)
|
||||
*pbuf++ = 0;
|
||||
|
||||
/* no flush required, since already in freeze */
|
||||
writel(dwcnt + 1, pbuf);
|
||||
while (--dwcnt)
|
||||
writel(0, pbuf++);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -70,7 +73,7 @@ static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
|
||||
* If rewrite is true, and bits are set in the sendbufferror registers,
|
||||
* we'll write to the buffer, for error recovery on parity errors.
|
||||
*/
|
||||
static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
|
||||
void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
|
||||
{
|
||||
u32 piobcnt;
|
||||
unsigned long sbuf[4];
|
||||
@ -84,12 +87,14 @@ static void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
|
||||
dd, dd->ipath_kregs->kr_sendbuffererror);
|
||||
sbuf[1] = ipath_read_kreg64(
|
||||
dd, dd->ipath_kregs->kr_sendbuffererror + 1);
|
||||
if (piobcnt > 128) {
|
||||
if (piobcnt > 128)
|
||||
sbuf[2] = ipath_read_kreg64(
|
||||
dd, dd->ipath_kregs->kr_sendbuffererror + 2);
|
||||
if (piobcnt > 192)
|
||||
sbuf[3] = ipath_read_kreg64(
|
||||
dd, dd->ipath_kregs->kr_sendbuffererror + 3);
|
||||
}
|
||||
else
|
||||
sbuf[3] = 0;
|
||||
|
||||
if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
|
||||
int i;
|
||||
@ -254,24 +259,20 @@ void ipath_format_hwerrors(u64 hwerrs,
|
||||
}
|
||||
|
||||
/* return the strings for the most common link states */
|
||||
static char *ib_linkstate(u32 linkstate)
|
||||
static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
|
||||
{
|
||||
char *ret;
|
||||
u32 state;
|
||||
|
||||
switch (linkstate) {
|
||||
case IPATH_IBSTATE_INIT:
|
||||
state = ipath_ib_state(dd, ibcs);
|
||||
if (state == dd->ib_init)
|
||||
ret = "Init";
|
||||
break;
|
||||
case IPATH_IBSTATE_ARM:
|
||||
else if (state == dd->ib_arm)
|
||||
ret = "Arm";
|
||||
break;
|
||||
case IPATH_IBSTATE_ACTIVE:
|
||||
else if (state == dd->ib_active)
|
||||
ret = "Active";
|
||||
break;
|
||||
default:
|
||||
else
|
||||
ret = "Down";
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -286,103 +287,172 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
|
||||
}
|
||||
|
||||
static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
|
||||
ipath_err_t errs, int noprint)
|
||||
ipath_err_t errs)
|
||||
{
|
||||
u64 val;
|
||||
u32 ltstate, lstate;
|
||||
u32 ltstate, lstate, ibstate, lastlstate;
|
||||
u32 init = dd->ib_init;
|
||||
u32 arm = dd->ib_arm;
|
||||
u32 active = dd->ib_active;
|
||||
const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
|
||||
|
||||
lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
|
||||
ibstate = ipath_ib_state(dd, ibcs);
|
||||
/* linkstate at last interrupt */
|
||||
lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
|
||||
ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
|
||||
|
||||
/*
|
||||
* even if diags are enabled, we want to notice LINKINIT, etc.
|
||||
* We just don't want to change the LED state, or
|
||||
* dd->ipath_kregs->kr_ibcctrl
|
||||
* Since going into a recovery state causes the link state to go
|
||||
* down and since recovery is transitory, it is better if we "miss"
|
||||
* ever seeing the link training state go into recovery (i.e.,
|
||||
* ignore this transition for link state special handling purposes)
|
||||
* without even updating ipath_lastibcstat.
|
||||
*/
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
|
||||
lstate = val & IPATH_IBSTATE_MASK;
|
||||
if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
|
||||
(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
|
||||
(ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* this is confusing enough when it happens that I want to always put it
|
||||
* on the console and in the logs. If it was a requested state change,
|
||||
* we'll have already cleared the flags, so we won't print this warning
|
||||
* if linkstate transitions into INIT from any of the various down
|
||||
* states, or if it transitions from any of the up (INIT or better)
|
||||
* states into any of the down states (except link recovery), then
|
||||
* call the chip-specific code to take appropriate actions.
|
||||
*/
|
||||
if ((lstate != IPATH_IBSTATE_ARM && lstate != IPATH_IBSTATE_ACTIVE)
|
||||
&& (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
|
||||
dev_info(&dd->pcidev->dev, "Link state changed from %s to %s\n",
|
||||
(dd->ipath_flags & IPATH_LINKARMED) ? "ARM" : "ACTIVE",
|
||||
ib_linkstate(lstate));
|
||||
/*
|
||||
* Flush all queued sends when link went to DOWN or INIT,
|
||||
* to be sure that they don't block SMA and other MAD packets
|
||||
*/
|
||||
ipath_cancel_sends(dd, 1);
|
||||
}
|
||||
else if (lstate == IPATH_IBSTATE_INIT || lstate == IPATH_IBSTATE_ARM ||
|
||||
lstate == IPATH_IBSTATE_ACTIVE) {
|
||||
/*
|
||||
* only print at SMA if there is a change, debug if not
|
||||
* (sometimes we want to know that, usually not).
|
||||
*/
|
||||
if (lstate == ((unsigned) dd->ipath_lastibcstat
|
||||
& IPATH_IBSTATE_MASK)) {
|
||||
ipath_dbg("Status change intr but no change (%s)\n",
|
||||
ib_linkstate(lstate));
|
||||
if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
|
||||
lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
|
||||
/* transitioned to UP */
|
||||
if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
|
||||
/* link came up, so we must no longer be disabled */
|
||||
dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
|
||||
ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
|
||||
goto skip_ibchange; /* chip-code handled */
|
||||
}
|
||||
} else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
|
||||
(dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
|
||||
ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
|
||||
ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
|
||||
int handled;
|
||||
handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
|
||||
dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
|
||||
if (handled) {
|
||||
ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
|
||||
goto skip_ibchange; /* chip-code handled */
|
||||
}
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "Unit %u link state %s, last "
|
||||
"was %s\n", dd->ipath_unit,
|
||||
ib_linkstate(lstate),
|
||||
ib_linkstate((unsigned)
|
||||
dd->ipath_lastibcstat
|
||||
& IPATH_IBSTATE_MASK));
|
||||
}
|
||||
else {
|
||||
lstate = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK;
|
||||
if (lstate == IPATH_IBSTATE_INIT ||
|
||||
lstate == IPATH_IBSTATE_ARM ||
|
||||
lstate == IPATH_IBSTATE_ACTIVE)
|
||||
ipath_cdbg(VERBOSE, "Unit %u link state down"
|
||||
" (state 0x%x), from %s\n",
|
||||
dd->ipath_unit,
|
||||
(u32)val & IPATH_IBSTATE_MASK,
|
||||
ib_linkstate(lstate));
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "Unit %u link state changed "
|
||||
"to 0x%x from down (%x)\n",
|
||||
dd->ipath_unit, (u32) val, lstate);
|
||||
|
||||
/*
|
||||
* Significant enough to always print and get into logs, if it was
|
||||
* unexpected. If it was a requested state change, we'll have
|
||||
* already cleared the flags, so we won't print this warning
|
||||
*/
|
||||
if ((ibstate != arm && ibstate != active) &&
|
||||
(dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
|
||||
dev_info(&dd->pcidev->dev, "Link state changed from %s "
|
||||
"to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
|
||||
"ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
|
||||
}
|
||||
ltstate = (val >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
|
||||
INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
|
||||
lstate = (val >> INFINIPATH_IBCS_LINKSTATE_SHIFT) &
|
||||
INFINIPATH_IBCS_LINKSTATE_MASK;
|
||||
|
||||
if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
|
||||
ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
|
||||
u32 last_ltstate;
|
||||
|
||||
u32 lastlts;
|
||||
lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
|
||||
/*
|
||||
* Ignore cycling back and forth from Polling.Active
|
||||
* to Polling.Quiet while waiting for the other end of
|
||||
* the link to come up. We will cycle back and forth
|
||||
* between them if no cable is plugged in,
|
||||
* the other device is powered off or disabled, etc.
|
||||
* Ignore cycling back and forth from Polling.Active to
|
||||
* Polling.Quiet while waiting for the other end of the link
|
||||
* to come up, except to try and decide if we are connected
|
||||
* to a live IB device or not. We will cycle back and
|
||||
* forth between them if no cable is plugged in, the other
|
||||
* device is powered off or disabled, etc.
|
||||
*/
|
||||
last_ltstate = (dd->ipath_lastibcstat >>
|
||||
INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT)
|
||||
& INFINIPATH_IBCS_LINKTRAININGSTATE_MASK;
|
||||
if (last_ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE
|
||||
|| last_ltstate ==
|
||||
INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
|
||||
if (dd->ipath_ibpollcnt > 40) {
|
||||
if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
|
||||
lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
|
||||
if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
|
||||
(++dd->ipath_ibpollcnt == 40)) {
|
||||
dd->ipath_flags |= IPATH_NOCABLE;
|
||||
*dd->ipath_statusp |=
|
||||
IPATH_STATUS_IB_NOCABLE;
|
||||
} else
|
||||
dd->ipath_ibpollcnt++;
|
||||
ipath_cdbg(LINKVERB, "Set NOCABLE\n");
|
||||
}
|
||||
ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
|
||||
ipath_ibcstatus_str[ltstate], ibstate);
|
||||
goto skip_ibchange;
|
||||
}
|
||||
}
|
||||
dd->ipath_ibpollcnt = 0; /* some state other than 2 or 3 */
|
||||
|
||||
dd->ipath_ibpollcnt = 0; /* not poll*, now */
|
||||
ipath_stats.sps_iblink++;
|
||||
if (ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
|
||||
|
||||
if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
|
||||
u64 linkrecov;
|
||||
linkrecov = ipath_snap_cntr(dd,
|
||||
dd->ipath_cregs->cr_iblinkerrrecovcnt);
|
||||
if (linkrecov != dd->ipath_lastlinkrecov) {
|
||||
ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
|
||||
ibcs, ib_linkstate(dd, ibcs),
|
||||
ipath_ibcstatus_str[ltstate],
|
||||
linkrecov);
|
||||
/* and no more until active again */
|
||||
dd->ipath_lastlinkrecov = 0;
|
||||
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
|
||||
goto skip_ibchange;
|
||||
}
|
||||
}
|
||||
|
||||
if (ibstate == init || ibstate == arm || ibstate == active) {
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
|
||||
if (ibstate == init || ibstate == arm) {
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
||||
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
||||
}
|
||||
if (ibstate == arm) {
|
||||
dd->ipath_flags |= IPATH_LINKARMED;
|
||||
dd->ipath_flags &= ~(IPATH_LINKUNK |
|
||||
IPATH_LINKINIT | IPATH_LINKDOWN |
|
||||
IPATH_LINKACTIVE | IPATH_NOCABLE);
|
||||
ipath_hol_down(dd);
|
||||
} else if (ibstate == init) {
|
||||
/*
|
||||
* set INIT and DOWN. Down is checked by
|
||||
* most of the other code, but INIT is
|
||||
* useful to know in a few places.
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_LINKINIT |
|
||||
IPATH_LINKDOWN;
|
||||
dd->ipath_flags &= ~(IPATH_LINKUNK |
|
||||
IPATH_LINKARMED | IPATH_LINKACTIVE |
|
||||
IPATH_NOCABLE);
|
||||
ipath_hol_down(dd);
|
||||
} else { /* active */
|
||||
dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
|
||||
dd->ipath_cregs->cr_iblinkerrrecovcnt);
|
||||
*dd->ipath_statusp |=
|
||||
IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
|
||||
dd->ipath_flags |= IPATH_LINKACTIVE;
|
||||
dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
|
||||
| IPATH_LINKDOWN | IPATH_LINKARMED |
|
||||
IPATH_NOCABLE);
|
||||
if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
|
||||
ipath_restart_sdma(dd);
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
|
||||
/* LED active not handled in chip _f_updown */
|
||||
dd->ipath_f_setextled(dd, lstate, ltstate);
|
||||
ipath_hol_up(dd);
|
||||
}
|
||||
|
||||
/*
|
||||
* print after we've already done the work, so as not to
|
||||
* delay the state changes and notifications, for debugging
|
||||
*/
|
||||
if (lstate == lastlstate)
|
||||
ipath_cdbg(LINKVERB, "Unchanged from last: %s "
|
||||
"(%x)\n", ib_linkstate(dd, ibcs), ibstate);
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
|
||||
dd->ipath_unit, ib_linkstate(dd, ibcs),
|
||||
ipath_ibcstatus_str[ltstate], ibstate);
|
||||
} else { /* down */
|
||||
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
||||
dd->ipath_flags |= IPATH_LINKDOWN;
|
||||
@ -391,69 +461,28 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
|
||||
IPATH_LINKARMED);
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
||||
dd->ipath_lli_counter = 0;
|
||||
if (!noprint) {
|
||||
if (((dd->ipath_lastibcstat >>
|
||||
INFINIPATH_IBCS_LINKSTATE_SHIFT) &
|
||||
INFINIPATH_IBCS_LINKSTATE_MASK)
|
||||
== INFINIPATH_IBCS_L_STATE_ACTIVE)
|
||||
/* if from up to down be more vocal */
|
||||
ipath_cdbg(VERBOSE,
|
||||
"Unit %u link now down (%s)\n",
|
||||
dd->ipath_unit,
|
||||
ipath_ibcstatus_str[ltstate]);
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "Unit %u link is "
|
||||
"down (%s)\n", dd->ipath_unit,
|
||||
ipath_ibcstatus_str[ltstate]);
|
||||
}
|
||||
|
||||
dd->ipath_f_setextled(dd, lstate, ltstate);
|
||||
} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ACTIVE) {
|
||||
dd->ipath_flags |= IPATH_LINKACTIVE;
|
||||
dd->ipath_flags &=
|
||||
~(IPATH_LINKUNK | IPATH_LINKINIT | IPATH_LINKDOWN |
|
||||
IPATH_LINKARMED | IPATH_NOCABLE);
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
|
||||
*dd->ipath_statusp |=
|
||||
IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
|
||||
dd->ipath_f_setextled(dd, lstate, ltstate);
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
|
||||
} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_INIT) {
|
||||
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
||||
/*
|
||||
* set INIT and DOWN. Down is checked by most of the other
|
||||
* code, but INIT is useful to know in a few places.
|
||||
*/
|
||||
dd->ipath_flags |= IPATH_LINKINIT | IPATH_LINKDOWN;
|
||||
dd->ipath_flags &=
|
||||
~(IPATH_LINKUNK | IPATH_LINKACTIVE | IPATH_LINKARMED
|
||||
| IPATH_NOCABLE);
|
||||
*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
|
||||
| IPATH_STATUS_IB_READY);
|
||||
dd->ipath_f_setextled(dd, lstate, ltstate);
|
||||
} else if ((val & IPATH_IBSTATE_MASK) == IPATH_IBSTATE_ARM) {
|
||||
if (dd->ipath_flags & IPATH_LINKACTIVE)
|
||||
signal_ib_event(dd, IB_EVENT_PORT_ERR);
|
||||
dd->ipath_flags |= IPATH_LINKARMED;
|
||||
dd->ipath_flags &=
|
||||
~(IPATH_LINKUNK | IPATH_LINKDOWN | IPATH_LINKINIT |
|
||||
IPATH_LINKACTIVE | IPATH_NOCABLE);
|
||||
*dd->ipath_statusp &= ~(IPATH_STATUS_IB_NOCABLE
|
||||
| IPATH_STATUS_IB_READY);
|
||||
dd->ipath_f_setextled(dd, lstate, ltstate);
|
||||
} else {
|
||||
if (!noprint)
|
||||
ipath_dbg("IBstatuschange unit %u: %s (%x)\n",
|
||||
dd->ipath_unit,
|
||||
ipath_ibcstatus_str[ltstate], ltstate);
|
||||
if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
|
||||
ipath_cdbg(VERBOSE, "Unit %u link state down "
|
||||
"(state 0x%x), from %s\n",
|
||||
dd->ipath_unit, lstate,
|
||||
ib_linkstate(dd, dd->ipath_lastibcstat));
|
||||
else
|
||||
ipath_cdbg(LINKVERB, "Unit %u link state changed "
|
||||
"to %s (0x%x) from down (%x)\n",
|
||||
dd->ipath_unit,
|
||||
ipath_ibcstatus_str[ltstate],
|
||||
ibstate, lastlstate);
|
||||
}
|
||||
|
||||
skip_ibchange:
|
||||
dd->ipath_lastibcstat = val;
|
||||
dd->ipath_lastibcstat = ibcs;
|
||||
done:
|
||||
return;
|
||||
}
|
||||
|
||||
static void handle_supp_msgs(struct ipath_devdata *dd,
|
||||
unsigned supp_msgs, char *msg, int msgsz)
|
||||
unsigned supp_msgs, char *msg, u32 msgsz)
|
||||
{
|
||||
/*
|
||||
* Print the message unless it's ibc status change only, which
|
||||
@ -461,12 +490,19 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
|
||||
*/
|
||||
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
|
||||
int iserr;
|
||||
iserr = ipath_decode_err(msg, msgsz,
|
||||
ipath_err_t mask;
|
||||
iserr = ipath_decode_err(dd, msg, msgsz,
|
||||
dd->ipath_lasterror &
|
||||
~INFINIPATH_E_IBSTATUSCHANGED);
|
||||
if (dd->ipath_lasterror &
|
||||
~(INFINIPATH_E_RRCVEGRFULL |
|
||||
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
||||
|
||||
mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
||||
INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
|
||||
|
||||
/* if we're in debug, then don't mask SDMADISABLED msgs */
|
||||
if (ipath_debug & __IPATH_DBG)
|
||||
mask &= ~INFINIPATH_E_SDMADISABLED;
|
||||
|
||||
if (dd->ipath_lasterror & ~mask)
|
||||
ipath_dev_err(dd, "Suppressed %u messages for "
|
||||
"fast-repeating errors (%s) (%llx)\n",
|
||||
supp_msgs, msg,
|
||||
@ -493,7 +529,7 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
|
||||
|
||||
static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
||||
ipath_err_t errs, char *msg,
|
||||
int msgsz, int *noprint)
|
||||
u32 msgsz, int *noprint)
|
||||
{
|
||||
unsigned long nc;
|
||||
static unsigned long nextmsg_time;
|
||||
@ -523,19 +559,125 @@ static unsigned handle_frequent_errors(struct ipath_devdata *dd,
|
||||
return supp_msgs;
|
||||
}
|
||||
|
||||
static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
{
|
||||
unsigned long flags;
|
||||
int expected;
|
||||
|
||||
if (ipath_debug & __IPATH_DBG) {
|
||||
char msg[128];
|
||||
ipath_decode_err(dd, msg, sizeof msg, errs &
|
||||
INFINIPATH_E_SDMAERRS);
|
||||
ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
|
||||
}
|
||||
if (ipath_debug & __IPATH_VERBDBG) {
|
||||
unsigned long tl, hd, status, lengen;
|
||||
tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
|
||||
hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
|
||||
status = ipath_read_kreg64(dd
|
||||
, dd->ipath_kregs->kr_senddmastatus);
|
||||
lengen = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_senddmalengen);
|
||||
ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
|
||||
"lengen 0x%lx\n", tl, hd, status, lengen);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
||||
expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
if (!expected)
|
||||
ipath_cancel_sends(dd, 1);
|
||||
}
|
||||
|
||||
static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
|
||||
{
|
||||
unsigned long flags;
|
||||
int expected;
|
||||
|
||||
if ((istat & INFINIPATH_I_SDMAINT) &&
|
||||
!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
ipath_sdma_intr(dd);
|
||||
|
||||
if (istat & INFINIPATH_I_SDMADISABLED) {
|
||||
expected = test_bit(IPATH_SDMA_ABORTING,
|
||||
&dd->ipath_sdma_status);
|
||||
ipath_dbg("%s SDmaDisabled intr\n",
|
||||
expected ? "expected" : "unexpected");
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
if (!expected)
|
||||
ipath_cancel_sends(dd, 1);
|
||||
if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
|
||||
}
|
||||
}
|
||||
|
||||
static int handle_hdrq_full(struct ipath_devdata *dd)
|
||||
{
|
||||
int chkerrpkts = 0;
|
||||
u32 hd, tl;
|
||||
u32 i;
|
||||
|
||||
ipath_stats.sps_hdrqfull++;
|
||||
for (i = 0; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
|
||||
if (i == 0) {
|
||||
/*
|
||||
* For kernel receive queues, we just want to know
|
||||
* if there are packets in the queue that we can
|
||||
* process.
|
||||
*/
|
||||
if (pd->port_head != ipath_get_hdrqtail(pd))
|
||||
chkerrpkts |= 1 << i;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Skip if user context is not open */
|
||||
if (!pd || !pd->port_cnt)
|
||||
continue;
|
||||
|
||||
/* Don't report the same point multiple times. */
|
||||
if (dd->ipath_flags & IPATH_NODMA_RTAIL)
|
||||
tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
|
||||
else
|
||||
tl = ipath_get_rcvhdrtail(pd);
|
||||
if (tl == pd->port_lastrcvhdrqtail)
|
||||
continue;
|
||||
|
||||
hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
|
||||
if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
|
||||
pd->port_lastrcvhdrqtail = tl;
|
||||
pd->port_hdrqfull++;
|
||||
/* flush hdrqfull so that poll() sees it */
|
||||
wmb();
|
||||
wake_up_interruptible(&pd->port_wait);
|
||||
}
|
||||
}
|
||||
|
||||
return chkerrpkts;
|
||||
}
|
||||
|
||||
static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
{
|
||||
char msg[128];
|
||||
u64 ignore_this_time = 0;
|
||||
int i, iserr = 0;
|
||||
u64 iserr = 0;
|
||||
int chkerrpkts = 0, noprint = 0;
|
||||
unsigned supp_msgs;
|
||||
int log_idx;
|
||||
|
||||
supp_msgs = handle_frequent_errors(dd, errs, msg, sizeof msg, &noprint);
|
||||
/*
|
||||
* don't report errors that are masked, either at init
|
||||
* (not set in ipath_errormask), or temporarily (set in
|
||||
* ipath_maskederrs)
|
||||
*/
|
||||
errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
|
||||
|
||||
/* don't report errors that are masked */
|
||||
errs &= ~dd->ipath_maskederrs;
|
||||
supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
|
||||
&noprint);
|
||||
|
||||
/* do these first, they are most important */
|
||||
if (errs & INFINIPATH_E_HARDWARE) {
|
||||
@ -550,6 +692,9 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
}
|
||||
}
|
||||
|
||||
if (errs & INFINIPATH_E_SDMAERRS)
|
||||
handle_sdma_errors(dd, errs);
|
||||
|
||||
if (!noprint && (errs & ~dd->ipath_e_bitsextant))
|
||||
ipath_dev_err(dd, "error interrupt with unknown errors "
|
||||
"%llx set\n", (unsigned long long)
|
||||
@ -580,18 +725,19 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
* ones on this particular interrupt, which also isn't great
|
||||
*/
|
||||
dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
|
||||
|
||||
dd->ipath_errormask &= ~dd->ipath_maskederrs;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||
dd->ipath_errormask);
|
||||
s_iserr = ipath_decode_err(msg, sizeof msg,
|
||||
dd->ipath_maskederrs);
|
||||
dd->ipath_errormask);
|
||||
s_iserr = ipath_decode_err(dd, msg, sizeof msg,
|
||||
dd->ipath_maskederrs);
|
||||
|
||||
if (dd->ipath_maskederrs &
|
||||
~(INFINIPATH_E_RRCVEGRFULL |
|
||||
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
||||
~(INFINIPATH_E_RRCVEGRFULL |
|
||||
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
|
||||
ipath_dev_err(dd, "Temporarily disabling "
|
||||
"error(s) %llx reporting; too frequent (%s)\n",
|
||||
(unsigned long long)dd->ipath_maskederrs,
|
||||
(unsigned long long) dd->ipath_maskederrs,
|
||||
msg);
|
||||
else {
|
||||
/*
|
||||
@ -633,26 +779,43 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
INFINIPATH_E_IBSTATUSCHANGED);
|
||||
}
|
||||
|
||||
/* likely due to cancel, so suppress */
|
||||
if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
|
||||
dd->ipath_spectriggerhit++;
|
||||
ipath_dbg("%lu special trigger hits\n",
|
||||
dd->ipath_spectriggerhit);
|
||||
}
|
||||
|
||||
/* likely due to cancel; so suppress message unless verbose */
|
||||
if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
|
||||
dd->ipath_lastcancel > jiffies) {
|
||||
ipath_dbg("Suppressed armlaunch/spktlen after error send cancel\n");
|
||||
/* armlaunch takes precedence; it often causes both. */
|
||||
ipath_cdbg(VERBOSE,
|
||||
"Suppressed %s error (%llx) after sendbuf cancel\n",
|
||||
(errs & INFINIPATH_E_SPIOARMLAUNCH) ?
|
||||
"armlaunch" : "sendpktlen", (unsigned long long)errs);
|
||||
errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
|
||||
}
|
||||
|
||||
if (!errs)
|
||||
return 0;
|
||||
|
||||
if (!noprint)
|
||||
if (!noprint) {
|
||||
ipath_err_t mask;
|
||||
/*
|
||||
* the ones we mask off are handled specially below or above
|
||||
* The ones we mask off are handled specially below
|
||||
* or above. Also mask SDMADISABLED by default as it
|
||||
* is too chatty.
|
||||
*/
|
||||
ipath_decode_err(msg, sizeof msg,
|
||||
errs & ~(INFINIPATH_E_IBSTATUSCHANGED |
|
||||
INFINIPATH_E_RRCVEGRFULL |
|
||||
INFINIPATH_E_RRCVHDRFULL |
|
||||
INFINIPATH_E_HARDWARE));
|
||||
else
|
||||
mask = INFINIPATH_E_IBSTATUSCHANGED |
|
||||
INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
||||
INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
|
||||
|
||||
/* if we're in debug, then don't mask SDMADISABLED msgs */
|
||||
if (ipath_debug & __IPATH_DBG)
|
||||
mask &= ~INFINIPATH_E_SDMADISABLED;
|
||||
|
||||
ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
|
||||
} else
|
||||
/* so we don't need if (!noprint) at strlcat's below */
|
||||
*msg = 0;
|
||||
|
||||
@ -677,40 +840,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
* fast_stats, no more than every 5 seconds, user ports get printed
|
||||
* on close
|
||||
*/
|
||||
if (errs & INFINIPATH_E_RRCVHDRFULL) {
|
||||
u32 hd, tl;
|
||||
ipath_stats.sps_hdrqfull++;
|
||||
for (i = 0; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
if (i == 0) {
|
||||
hd = pd->port_head;
|
||||
tl = (u32) le64_to_cpu(
|
||||
*dd->ipath_hdrqtailptr);
|
||||
} else if (pd && pd->port_cnt &&
|
||||
pd->port_rcvhdrtail_kvaddr) {
|
||||
/*
|
||||
* don't report same point multiple times,
|
||||
* except kernel
|
||||
*/
|
||||
tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
|
||||
if (tl == pd->port_lastrcvhdrqtail)
|
||||
continue;
|
||||
hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
|
||||
i);
|
||||
} else
|
||||
continue;
|
||||
if (hd == (tl + 1) ||
|
||||
(!hd && tl == dd->ipath_hdrqlast)) {
|
||||
if (i == 0)
|
||||
chkerrpkts = 1;
|
||||
pd->port_lastrcvhdrqtail = tl;
|
||||
pd->port_hdrqfull++;
|
||||
/* flush hdrqfull so that poll() sees it */
|
||||
wmb();
|
||||
wake_up_interruptible(&pd->port_wait);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errs & INFINIPATH_E_RRCVHDRFULL)
|
||||
chkerrpkts |= handle_hdrq_full(dd);
|
||||
if (errs & INFINIPATH_E_RRCVEGRFULL) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
|
||||
@ -721,9 +852,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
* vs user)
|
||||
*/
|
||||
ipath_stats.sps_etidfull++;
|
||||
if (pd->port_head !=
|
||||
(u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
|
||||
chkerrpkts = 1;
|
||||
if (pd->port_head != ipath_get_hdrqtail(pd))
|
||||
chkerrpkts |= 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -741,16 +871,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
|
||||
| IPATH_LINKARMED | IPATH_LINKACTIVE);
|
||||
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
|
||||
if (!noprint) {
|
||||
u64 st = ipath_read_kreg64(
|
||||
dd, dd->ipath_kregs->kr_ibcstatus);
|
||||
|
||||
ipath_dbg("Lost link, link now down (%s)\n",
|
||||
ipath_ibcstatus_str[st & 0xf]);
|
||||
}
|
||||
ipath_dbg("Lost link, link now down (%s)\n",
|
||||
ipath_ibcstatus_str[ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_ibcstatus) & 0xf]);
|
||||
}
|
||||
if (errs & INFINIPATH_E_IBSTATUSCHANGED)
|
||||
handle_e_ibstatuschanged(dd, errs, noprint);
|
||||
handle_e_ibstatuschanged(dd, errs);
|
||||
|
||||
if (errs & INFINIPATH_E_RESET) {
|
||||
if (!noprint)
|
||||
@ -765,9 +892,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
if (!noprint && *msg) {
|
||||
if (iserr)
|
||||
ipath_dev_err(dd, "%s error\n", msg);
|
||||
else
|
||||
dev_info(&dd->pcidev->dev, "%s packet problems\n",
|
||||
msg);
|
||||
}
|
||||
if (dd->ipath_state_wanted & dd->ipath_flags) {
|
||||
ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
|
||||
@ -779,7 +903,6 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
return chkerrpkts;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* try to cleanup as much as possible for anything that might have gone
|
||||
* wrong while in freeze mode, such as pio buffers being written by user
|
||||
@ -796,8 +919,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||
{
|
||||
int i, im;
|
||||
__le64 val;
|
||||
unsigned long flags;
|
||||
u64 val;
|
||||
|
||||
/* disable error interrupts, to avoid confusion */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
|
||||
@ -816,14 +938,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||
dd->ipath_control);
|
||||
|
||||
/* ensure pio avail updates continue */
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
ipath_force_pio_avail_update(dd);
|
||||
|
||||
/*
|
||||
* We just enabled pioavailupdate, so dma copy is almost certainly
|
||||
@ -831,10 +946,13 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
|
||||
*/
|
||||
for (i = 0; i < dd->ipath_pioavregs; i++) {
|
||||
/* deal with 6110 chip bug */
|
||||
im = i > 3 ? i ^ 1 : i;
|
||||
im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
|
||||
i ^ 1 : i;
|
||||
val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
|
||||
dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
|
||||
= le64_to_cpu(val);
|
||||
dd->ipath_pioavailregs_dma[i] = cpu_to_le64(val);
|
||||
dd->ipath_pioavailshadow[i] = val |
|
||||
(~dd->ipath_pioavailkernel[i] <<
|
||||
INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -950,7 +1068,7 @@ set:
|
||||
* process was waiting for a packet to arrive, and didn't want
|
||||
* to poll
|
||||
*/
|
||||
static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
||||
static void handle_urcv(struct ipath_devdata *dd, u64 istat)
|
||||
{
|
||||
u64 portr;
|
||||
int i;
|
||||
@ -966,12 +1084,13 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
||||
* and ipath_poll_next()...
|
||||
*/
|
||||
rmb();
|
||||
portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
|
||||
dd->ipath_i_rcvavail_mask)
|
||||
| ((istat >> INFINIPATH_I_RCVURG_SHIFT) &
|
||||
dd->ipath_i_rcvurg_mask);
|
||||
portr = ((istat >> dd->ipath_i_rcvavail_shift) &
|
||||
dd->ipath_i_rcvavail_mask) |
|
||||
((istat >> dd->ipath_i_rcvurg_shift) &
|
||||
dd->ipath_i_rcvurg_mask);
|
||||
for (i = 1; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
|
||||
if (portr & (1 << i) && pd && pd->port_cnt) {
|
||||
if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
|
||||
&pd->port_flag)) {
|
||||
@ -988,7 +1107,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
||||
}
|
||||
if (rcvdint) {
|
||||
/* only want to take one interrupt, so turn off the rcv
|
||||
* interrupt for all the ports that we did the wakeup on
|
||||
* interrupt for all the ports that we set the rcv_waiting
|
||||
* (but never for kernel port)
|
||||
*/
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
@ -999,12 +1118,11 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
|
||||
irqreturn_t ipath_intr(int irq, void *data)
|
||||
{
|
||||
struct ipath_devdata *dd = data;
|
||||
u32 istat, chk0rcv = 0;
|
||||
u64 istat, chk0rcv = 0;
|
||||
ipath_err_t estat = 0;
|
||||
irqreturn_t ret;
|
||||
static unsigned unexpected = 0;
|
||||
static const u32 port0rbits = (1U<<INFINIPATH_I_RCVAVAIL_SHIFT) |
|
||||
(1U<<INFINIPATH_I_RCVURG_SHIFT);
|
||||
u64 kportrbits;
|
||||
|
||||
ipath_stats.sps_ints++;
|
||||
|
||||
@ -1053,17 +1171,17 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
|
||||
if (unlikely(istat & ~dd->ipath_i_bitsextant))
|
||||
ipath_dev_err(dd,
|
||||
"interrupt with unknown interrupts %x set\n",
|
||||
istat & (u32) ~ dd->ipath_i_bitsextant);
|
||||
else
|
||||
ipath_cdbg(VERBOSE, "intr stat=0x%x\n", istat);
|
||||
"interrupt with unknown interrupts %Lx set\n",
|
||||
istat & ~dd->ipath_i_bitsextant);
|
||||
else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
|
||||
ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat);
|
||||
|
||||
if (unlikely(istat & INFINIPATH_I_ERROR)) {
|
||||
if (istat & INFINIPATH_I_ERROR) {
|
||||
ipath_stats.sps_errints++;
|
||||
estat = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_errorstatus);
|
||||
if (!estat)
|
||||
dev_info(&dd->pcidev->dev, "error interrupt (%x), "
|
||||
dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
|
||||
"but no error bits set!\n", istat);
|
||||
else if (estat == -1LL)
|
||||
/*
|
||||
@ -1073,9 +1191,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
ipath_dev_err(dd, "Read of error status failed "
|
||||
"(all bits set); ignoring\n");
|
||||
else
|
||||
if (handle_errors(dd, estat))
|
||||
/* force calling ipath_kreceive() */
|
||||
chk0rcv = 1;
|
||||
chk0rcv |= handle_errors(dd, estat);
|
||||
}
|
||||
|
||||
if (istat & INFINIPATH_I_GPIO) {
|
||||
@ -1093,8 +1209,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
|
||||
gpiostatus = ipath_read_kreg32(
|
||||
dd, dd->ipath_kregs->kr_gpio_status);
|
||||
/* First the error-counter case.
|
||||
*/
|
||||
/* First the error-counter case. */
|
||||
if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
|
||||
(dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
|
||||
/* want to clear the bits we see asserted. */
|
||||
@ -1156,7 +1271,6 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
(u64) to_clear);
|
||||
}
|
||||
}
|
||||
chk0rcv |= istat & port0rbits;
|
||||
|
||||
/*
|
||||
* Clear the interrupt bits we found set, unless they are receive
|
||||
@ -1169,22 +1283,25 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
|
||||
|
||||
/*
|
||||
* handle port0 receive before checking for pio buffers available,
|
||||
* since receives can overflow; piobuf waiters can afford a few
|
||||
* extra cycles, since they were waiting anyway, and user's waiting
|
||||
* for receive are at the bottom.
|
||||
* Handle kernel receive queues before checking for pio buffers
|
||||
* available since receives can overflow; piobuf waiters can afford
|
||||
* a few extra cycles, since they were waiting anyway, and user's
|
||||
* waiting for receive are at the bottom.
|
||||
*/
|
||||
if (chk0rcv) {
|
||||
kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
|
||||
(1ULL << dd->ipath_i_rcvurg_shift);
|
||||
if (chk0rcv || (istat & kportrbits)) {
|
||||
istat &= ~kportrbits;
|
||||
ipath_kreceive(dd->ipath_pd[0]);
|
||||
istat &= ~port0rbits;
|
||||
}
|
||||
|
||||
if (istat & ((dd->ipath_i_rcvavail_mask <<
|
||||
INFINIPATH_I_RCVAVAIL_SHIFT)
|
||||
| (dd->ipath_i_rcvurg_mask <<
|
||||
INFINIPATH_I_RCVURG_SHIFT)))
|
||||
if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
|
||||
(dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
|
||||
handle_urcv(dd, istat);
|
||||
|
||||
if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
|
||||
handle_sdma_intr(dd, istat);
|
||||
|
||||
if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
|
||||
unsigned long flags;
|
||||
|
||||
@ -1195,7 +1312,10 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
handle_layer_pioavail(dd);
|
||||
if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
|
||||
handle_layer_pioavail(dd);
|
||||
else
|
||||
ipath_dbg("unexpected BUFAVAIL intr\n");
|
||||
}
|
||||
|
||||
ret = IRQ_HANDLED;
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef _IPATH_KERNEL_H
|
||||
#define _IPATH_KERNEL_H
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -42,6 +42,8 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/io.h>
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
@ -175,9 +177,13 @@ struct ipath_portdata {
|
||||
u16 poll_type;
|
||||
/* port rcvhdrq head offset */
|
||||
u32 port_head;
|
||||
/* receive packet sequence counter */
|
||||
u32 port_seq_cnt;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
struct ipath_sge_state;
|
||||
struct ipath_verbs_txreq;
|
||||
|
||||
/*
|
||||
* control information for layered drivers
|
||||
@ -191,6 +197,40 @@ struct ipath_skbinfo {
|
||||
dma_addr_t phys;
|
||||
};
|
||||
|
||||
struct ipath_sdma_txreq {
|
||||
int flags;
|
||||
int sg_count;
|
||||
union {
|
||||
struct scatterlist *sg;
|
||||
void *map_addr;
|
||||
};
|
||||
void (*callback)(void *, int);
|
||||
void *callback_cookie;
|
||||
int callback_status;
|
||||
u16 start_idx; /* sdma private */
|
||||
u16 next_descq_idx; /* sdma private */
|
||||
struct list_head list; /* sdma private */
|
||||
};
|
||||
|
||||
struct ipath_sdma_desc {
|
||||
__le64 qw[2];
|
||||
};
|
||||
|
||||
#define IPATH_SDMA_TXREQ_F_USELARGEBUF 0x1
|
||||
#define IPATH_SDMA_TXREQ_F_HEADTOHOST 0x2
|
||||
#define IPATH_SDMA_TXREQ_F_INTREQ 0x4
|
||||
#define IPATH_SDMA_TXREQ_F_FREEBUF 0x8
|
||||
#define IPATH_SDMA_TXREQ_F_FREEDESC 0x10
|
||||
#define IPATH_SDMA_TXREQ_F_VL15 0x20
|
||||
|
||||
#define IPATH_SDMA_TXREQ_S_OK 0
|
||||
#define IPATH_SDMA_TXREQ_S_SENDERROR 1
|
||||
#define IPATH_SDMA_TXREQ_S_ABORTED 2
|
||||
#define IPATH_SDMA_TXREQ_S_SHUTDOWN 3
|
||||
|
||||
/* max dwords in small buffer packet */
|
||||
#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
|
||||
|
||||
/*
|
||||
* Possible IB config parameters for ipath_f_get/set_ib_cfg()
|
||||
*/
|
||||
@ -221,11 +261,6 @@ struct ipath_devdata {
|
||||
unsigned long ipath_physaddr;
|
||||
/* base of memory alloced for ipath_kregbase, for free */
|
||||
u64 *ipath_kregalloc;
|
||||
/*
|
||||
* virtual address where port0 rcvhdrqtail updated for this unit.
|
||||
* only written to by the chip, not the driver.
|
||||
*/
|
||||
volatile __le64 *ipath_hdrqtailptr;
|
||||
/* ipath_cfgports pointers */
|
||||
struct ipath_portdata **ipath_pd;
|
||||
/* sk_buffs used by port 0 eager receive queue */
|
||||
@ -283,6 +318,7 @@ struct ipath_devdata {
|
||||
/* per chip actions needed for IB Link up/down changes */
|
||||
int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
|
||||
|
||||
unsigned ipath_lastegr_idx;
|
||||
struct ipath_ibdev *verbs_dev;
|
||||
struct timer_list verbs_timer;
|
||||
/* total dwords sent (summed from counter) */
|
||||
@ -309,6 +345,7 @@ struct ipath_devdata {
|
||||
ipath_err_t ipath_lasthwerror;
|
||||
/* errors masked because they occur too fast */
|
||||
ipath_err_t ipath_maskederrs;
|
||||
u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
|
||||
/* time in jiffies at which to re-enable maskederrs */
|
||||
unsigned long ipath_unmasktime;
|
||||
/* count of egrfull errors, combined for all ports */
|
||||
@ -347,6 +384,7 @@ struct ipath_devdata {
|
||||
u32 ipath_lastrpkts;
|
||||
/* pio bufs allocated per port */
|
||||
u32 ipath_pbufsport;
|
||||
u32 ipath_pioupd_thresh; /* update threshold, some chips */
|
||||
/*
|
||||
* number of ports configured as max; zero is set to number chip
|
||||
* supports, less gives more pio bufs/port, etc.
|
||||
@ -365,6 +403,7 @@ struct ipath_devdata {
|
||||
* get to multiple devices
|
||||
*/
|
||||
u32 ipath_lastpioindex;
|
||||
u32 ipath_lastpioindexl;
|
||||
/* max length of freezemsg */
|
||||
u32 ipath_freezelen;
|
||||
/*
|
||||
@ -381,6 +420,15 @@ struct ipath_devdata {
|
||||
u32 ipath_pcibar0;
|
||||
/* so we can rewrite it after a chip reset */
|
||||
u32 ipath_pcibar1;
|
||||
u32 ipath_x1_fix_tries;
|
||||
u32 ipath_autoneg_tries;
|
||||
u32 serdes_first_init_done;
|
||||
|
||||
struct ipath_relock {
|
||||
atomic_t ipath_relock_timer_active;
|
||||
struct timer_list ipath_relock_timer;
|
||||
unsigned int ipath_relock_interval; /* in jiffies */
|
||||
} ipath_relock_singleton;
|
||||
|
||||
/* interrupt number */
|
||||
int ipath_irq;
|
||||
@ -403,7 +451,7 @@ struct ipath_devdata {
|
||||
u64 __iomem *ipath_egrtidbase;
|
||||
/* lock to workaround chip bug 9437 and others */
|
||||
spinlock_t ipath_kernel_tid_lock;
|
||||
spinlock_t ipath_tid_lock;
|
||||
spinlock_t ipath_user_tid_lock;
|
||||
spinlock_t ipath_sendctrl_lock;
|
||||
|
||||
/*
|
||||
@ -422,11 +470,48 @@ struct ipath_devdata {
|
||||
struct class_device *diag_class_dev;
|
||||
/* timer used to prevent stats overflow, error throttling, etc. */
|
||||
struct timer_list ipath_stats_timer;
|
||||
/* timer to verify interrupts work, and fallback if possible */
|
||||
struct timer_list ipath_intrchk_timer;
|
||||
void *ipath_dummy_hdrq; /* used after port close */
|
||||
dma_addr_t ipath_dummy_hdrq_phys;
|
||||
|
||||
/* SendDMA related entries */
|
||||
spinlock_t ipath_sdma_lock;
|
||||
u64 ipath_sdma_status;
|
||||
unsigned long ipath_sdma_abort_jiffies;
|
||||
unsigned long ipath_sdma_abort_intr_timeout;
|
||||
unsigned long ipath_sdma_buf_jiffies;
|
||||
struct ipath_sdma_desc *ipath_sdma_descq;
|
||||
u64 ipath_sdma_descq_added;
|
||||
u64 ipath_sdma_descq_removed;
|
||||
int ipath_sdma_desc_nreserved;
|
||||
u16 ipath_sdma_descq_cnt;
|
||||
u16 ipath_sdma_descq_tail;
|
||||
u16 ipath_sdma_descq_head;
|
||||
u16 ipath_sdma_next_intr;
|
||||
u16 ipath_sdma_reset_wait;
|
||||
u8 ipath_sdma_generation;
|
||||
struct tasklet_struct ipath_sdma_abort_task;
|
||||
struct tasklet_struct ipath_sdma_notify_task;
|
||||
struct list_head ipath_sdma_activelist;
|
||||
struct list_head ipath_sdma_notifylist;
|
||||
atomic_t ipath_sdma_vl15_count;
|
||||
struct timer_list ipath_sdma_vl15_timer;
|
||||
|
||||
dma_addr_t ipath_sdma_descq_phys;
|
||||
volatile __le64 *ipath_sdma_head_dma;
|
||||
dma_addr_t ipath_sdma_head_phys;
|
||||
|
||||
unsigned long ipath_ureg_align; /* user register alignment */
|
||||
|
||||
struct delayed_work ipath_autoneg_work;
|
||||
wait_queue_head_t ipath_autoneg_wait;
|
||||
|
||||
/* HoL blocking / user app forward-progress state */
|
||||
unsigned ipath_hol_state;
|
||||
unsigned ipath_hol_next;
|
||||
struct timer_list ipath_hol_timer;
|
||||
|
||||
/*
|
||||
* Shadow copies of registers; size indicates read access size.
|
||||
* Most of them are readonly, but some are write-only register,
|
||||
@ -447,6 +532,8 @@ struct ipath_devdata {
|
||||
* init time.
|
||||
*/
|
||||
unsigned long ipath_pioavailshadow[8];
|
||||
/* bitmap of send buffers available for the kernel to use with PIO. */
|
||||
unsigned long ipath_pioavailkernel[8];
|
||||
/* shadow of kr_gpio_out, for rmw ops */
|
||||
u64 ipath_gpio_out;
|
||||
/* shadow the gpio mask register */
|
||||
@ -472,6 +559,8 @@ struct ipath_devdata {
|
||||
u64 ipath_intconfig;
|
||||
/* kr_sendpiobufbase value */
|
||||
u64 ipath_piobufbase;
|
||||
/* kr_ibcddrctrl shadow */
|
||||
u64 ipath_ibcddrctrl;
|
||||
|
||||
/* these are the "32 bit" regs */
|
||||
|
||||
@ -488,7 +577,10 @@ struct ipath_devdata {
|
||||
unsigned long ipath_rcvctrl;
|
||||
/* shadow kr_sendctrl */
|
||||
unsigned long ipath_sendctrl;
|
||||
unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */
|
||||
/* to not count armlaunch after cancel */
|
||||
unsigned long ipath_lastcancel;
|
||||
/* count cases where special trigger was needed (double write) */
|
||||
unsigned long ipath_spectriggerhit;
|
||||
|
||||
/* value we put in kr_rcvhdrcnt */
|
||||
u32 ipath_rcvhdrcnt;
|
||||
@ -510,6 +602,7 @@ struct ipath_devdata {
|
||||
u32 ipath_piobcnt4k;
|
||||
/* size in bytes of "4KB" PIO buffers */
|
||||
u32 ipath_piosize4k;
|
||||
u32 ipath_pioreserved; /* reserved special-inkernel; */
|
||||
/* kr_rcvegrbase value */
|
||||
u32 ipath_rcvegrbase;
|
||||
/* kr_rcvegrcnt value */
|
||||
@ -546,10 +639,10 @@ struct ipath_devdata {
|
||||
u32 ipath_init_ibmaxlen;
|
||||
/* size of each rcvegrbuffer */
|
||||
u32 ipath_rcvegrbufsize;
|
||||
/* width (2,4,8,16,32) from HT config reg */
|
||||
u32 ipath_htwidth;
|
||||
/* HT speed (200,400,800,1000) from HT config */
|
||||
u32 ipath_htspeed;
|
||||
/* localbus width (1, 2,4,8,16,32) from config space */
|
||||
u32 ipath_lbus_width;
|
||||
/* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
|
||||
u32 ipath_lbus_speed;
|
||||
/*
|
||||
* number of sequential ibcstatus change for polling active/quiet
|
||||
* (i.e., link not coming up).
|
||||
@ -573,21 +666,14 @@ struct ipath_devdata {
|
||||
*/
|
||||
u8 ipath_serial[16];
|
||||
/* human readable board version */
|
||||
u8 ipath_boardversion[80];
|
||||
u8 ipath_boardversion[96];
|
||||
u8 ipath_lbus_info[32]; /* human readable localbus info */
|
||||
/* chip major rev, from ipath_revision */
|
||||
u8 ipath_majrev;
|
||||
/* chip minor rev, from ipath_revision */
|
||||
u8 ipath_minrev;
|
||||
/* board rev, from ipath_revision */
|
||||
u8 ipath_boardrev;
|
||||
|
||||
u8 ipath_r_portenable_shift;
|
||||
u8 ipath_r_intravail_shift;
|
||||
u8 ipath_r_tailupd_shift;
|
||||
u8 ipath_r_portcfg_shift;
|
||||
|
||||
/* unit # of this chip, if present */
|
||||
int ipath_unit;
|
||||
/* saved for restore after reset */
|
||||
u8 ipath_pci_cacheline;
|
||||
/* LID mask control */
|
||||
@ -603,6 +689,14 @@ struct ipath_devdata {
|
||||
/* Rx Polarity inversion (compensate for ~tx on partner) */
|
||||
u8 ipath_rx_pol_inv;
|
||||
|
||||
u8 ipath_r_portenable_shift;
|
||||
u8 ipath_r_intravail_shift;
|
||||
u8 ipath_r_tailupd_shift;
|
||||
u8 ipath_r_portcfg_shift;
|
||||
|
||||
/* unit # of this chip, if present */
|
||||
int ipath_unit;
|
||||
|
||||
/* local link integrity counter */
|
||||
u32 ipath_lli_counter;
|
||||
/* local link integrity errors */
|
||||
@ -617,9 +711,6 @@ struct ipath_devdata {
|
||||
u32 ipath_overrun_thresh_errs;
|
||||
u32 ipath_lli_errs;
|
||||
|
||||
/* status check work */
|
||||
struct delayed_work status_work;
|
||||
|
||||
/*
|
||||
* Not all devices managed by a driver instance are the same
|
||||
* type, so these fields must be per-device.
|
||||
@ -632,8 +723,8 @@ struct ipath_devdata {
|
||||
* Below should be computable from number of ports,
|
||||
* since they are never modified.
|
||||
*/
|
||||
u32 ipath_i_rcvavail_mask;
|
||||
u32 ipath_i_rcvurg_mask;
|
||||
u64 ipath_i_rcvavail_mask;
|
||||
u64 ipath_i_rcvurg_mask;
|
||||
u16 ipath_i_rcvurg_shift;
|
||||
u16 ipath_i_rcvavail_shift;
|
||||
|
||||
@ -641,8 +732,9 @@ struct ipath_devdata {
|
||||
* Register bits for selecting i2c direction and values, used for
|
||||
* I2C serial flash.
|
||||
*/
|
||||
u16 ipath_gpio_sda_num;
|
||||
u16 ipath_gpio_scl_num;
|
||||
u8 ipath_gpio_sda_num;
|
||||
u8 ipath_gpio_scl_num;
|
||||
u8 ipath_i2c_chain_type;
|
||||
u64 ipath_gpio_sda;
|
||||
u64 ipath_gpio_scl;
|
||||
|
||||
@ -703,13 +795,51 @@ struct ipath_devdata {
|
||||
/* interrupt mitigation reload register info */
|
||||
u16 ipath_jint_idle_ticks; /* idle clock ticks */
|
||||
u16 ipath_jint_max_packets; /* max packets across all ports */
|
||||
|
||||
/*
|
||||
* lock for access to SerDes, and flags to sequence preset
|
||||
* versus steady-state. 7220-only at the moment.
|
||||
*/
|
||||
spinlock_t ipath_sdepb_lock;
|
||||
u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
|
||||
};
|
||||
|
||||
/* ipath_hol_state values (stopping/starting user proc, send flushing) */
|
||||
#define IPATH_HOL_UP 0
|
||||
#define IPATH_HOL_DOWN 1
|
||||
/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
|
||||
#define IPATH_HOL_DOWNSTOP 0
|
||||
#define IPATH_HOL_DOWNCONT 1
|
||||
|
||||
/* bit positions for sdma_status */
|
||||
#define IPATH_SDMA_ABORTING 0
|
||||
#define IPATH_SDMA_DISARMED 1
|
||||
#define IPATH_SDMA_DISABLED 2
|
||||
#define IPATH_SDMA_LAYERBUF 3
|
||||
#define IPATH_SDMA_RUNNING 62
|
||||
#define IPATH_SDMA_SHUTDOWN 63
|
||||
|
||||
/* bit combinations that correspond to abort states */
|
||||
#define IPATH_SDMA_ABORT_NONE 0
|
||||
#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
|
||||
#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
|
||||
(1UL << IPATH_SDMA_DISARMED))
|
||||
#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
|
||||
(1UL << IPATH_SDMA_DISABLED))
|
||||
#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
|
||||
(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
|
||||
#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
|
||||
(1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
|
||||
|
||||
#define IPATH_SDMA_BUF_NONE 0
|
||||
#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
|
||||
|
||||
/* Private data for file operations */
|
||||
struct ipath_filedata {
|
||||
struct ipath_portdata *pd;
|
||||
unsigned subport;
|
||||
unsigned tidcursor;
|
||||
struct ipath_user_sdma_queue *pq;
|
||||
};
|
||||
extern struct list_head ipath_dev_list;
|
||||
extern spinlock_t ipath_devs_lock;
|
||||
@ -718,7 +848,7 @@ extern struct ipath_devdata *ipath_lookup(int unit);
|
||||
int ipath_init_chip(struct ipath_devdata *, int);
|
||||
int ipath_enable_wc(struct ipath_devdata *dd);
|
||||
void ipath_disable_wc(struct ipath_devdata *dd);
|
||||
int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
|
||||
int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
|
||||
void ipath_shutdown_device(struct ipath_devdata *);
|
||||
void ipath_clear_freeze(struct ipath_devdata *);
|
||||
|
||||
@ -741,7 +871,8 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
|
||||
extern int ipath_diag_inuse;
|
||||
|
||||
irqreturn_t ipath_intr(int irq, void *devid);
|
||||
int ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
|
||||
int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
|
||||
ipath_err_t err);
|
||||
#if __IPATH_INFO || __IPATH_DBG
|
||||
extern const char *ipath_ibcstatus_str[];
|
||||
#endif
|
||||
@ -774,6 +905,13 @@ int ipath_set_lid(struct ipath_devdata *, u32, u8);
|
||||
int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
|
||||
void ipath_enable_armlaunch(struct ipath_devdata *);
|
||||
void ipath_disable_armlaunch(struct ipath_devdata *);
|
||||
void ipath_hol_down(struct ipath_devdata *);
|
||||
void ipath_hol_up(struct ipath_devdata *);
|
||||
void ipath_hol_event(unsigned long);
|
||||
void ipath_toggle_rclkrls(struct ipath_devdata *);
|
||||
void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
|
||||
void ipath_set_relock_poll(struct ipath_devdata *, int);
|
||||
void ipath_shutdown_relock_poll(struct ipath_devdata *);
|
||||
|
||||
/* for use in system calls, where we want to know device type, etc. */
|
||||
#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
|
||||
@ -781,11 +919,15 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
|
||||
((struct ipath_filedata *)(fp)->private_data)->subport
|
||||
#define tidcursor_fp(fp) \
|
||||
((struct ipath_filedata *)(fp)->private_data)->tidcursor
|
||||
#define user_sdma_queue_fp(fp) \
|
||||
((struct ipath_filedata *)(fp)->private_data)->pq
|
||||
|
||||
/*
|
||||
* values for ipath_flags
|
||||
*/
|
||||
/* The chip is up and initted */
|
||||
/* chip can report link latency (IB 1.2) */
|
||||
#define IPATH_HAS_LINK_LATENCY 0x1
|
||||
/* The chip is up and initted */
|
||||
#define IPATH_INITTED 0x2
|
||||
/* set if any user code has set kr_rcvhdrsize */
|
||||
#define IPATH_RCVHDRSZ_SET 0x4
|
||||
@ -809,6 +951,8 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
|
||||
#define IPATH_LINKUNK 0x400
|
||||
/* Write combining flush needed for PIO */
|
||||
#define IPATH_PIO_FLUSH_WC 0x1000
|
||||
/* DMA Receive tail pointer */
|
||||
#define IPATH_NODMA_RTAIL 0x2000
|
||||
/* no IB cable, or no device on IB cable */
|
||||
#define IPATH_NOCABLE 0x4000
|
||||
/* Supports port zero per packet receive interrupts via
|
||||
@ -819,16 +963,26 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
|
||||
/* packet/word counters are 32 bit, else those 4 counters
|
||||
* are 64bit */
|
||||
#define IPATH_32BITCOUNTERS 0x20000
|
||||
/* can miss port0 rx interrupts */
|
||||
/* Interrupt register is 64 bits */
|
||||
#define IPATH_INTREG_64 0x40000
|
||||
/* can miss port0 rx interrupts */
|
||||
#define IPATH_DISABLED 0x80000 /* administratively disabled */
|
||||
/* Use GPIO interrupts for new counters */
|
||||
#define IPATH_GPIO_ERRINTRS 0x100000
|
||||
#define IPATH_SWAP_PIOBUFS 0x200000
|
||||
/* Supports Send DMA */
|
||||
#define IPATH_HAS_SEND_DMA 0x400000
|
||||
/* Supports Send Count (not just word count) in PBC */
|
||||
#define IPATH_HAS_PBC_CNT 0x800000
|
||||
/* Suppress heartbeat, even if turning off loopback */
|
||||
#define IPATH_NO_HRTBT 0x1000000
|
||||
#define IPATH_HAS_THRESH_UPDATE 0x4000000
|
||||
#define IPATH_HAS_MULT_IB_SPEED 0x8000000
|
||||
#define IPATH_IB_AUTONEG_INPROG 0x10000000
|
||||
#define IPATH_IB_AUTONEG_FAILED 0x20000000
|
||||
/* Linkdown-disable intentionally, Do not attempt to bring up */
|
||||
#define IPATH_IB_LINK_DISABLED 0x40000000
|
||||
#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
|
||||
|
||||
/* Bits in GPIO for the added interrupts */
|
||||
#define IPATH_GPIO_PORT0_BIT 2
|
||||
@ -847,13 +1001,18 @@ void ipath_disable_armlaunch(struct ipath_devdata *);
|
||||
|
||||
/* free up any allocated data at closes */
|
||||
void ipath_free_data(struct ipath_portdata *dd);
|
||||
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
|
||||
u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
|
||||
void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
|
||||
unsigned len, int avail);
|
||||
void ipath_init_iba7220_funcs(struct ipath_devdata *);
|
||||
void ipath_init_iba6120_funcs(struct ipath_devdata *);
|
||||
void ipath_init_iba6110_funcs(struct ipath_devdata *);
|
||||
void ipath_get_eeprom_info(struct ipath_devdata *);
|
||||
int ipath_update_eeprom_log(struct ipath_devdata *dd);
|
||||
void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
|
||||
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
|
||||
void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
|
||||
void ipath_force_pio_avail_update(struct ipath_devdata *);
|
||||
void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
|
||||
|
||||
/*
|
||||
@ -865,6 +1024,34 @@ void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
|
||||
#define IPATH_LED_LOG 2 /* Logical (link) YELLOW LED */
|
||||
void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
|
||||
|
||||
/* send dma routines */
|
||||
int setup_sdma(struct ipath_devdata *);
|
||||
void teardown_sdma(struct ipath_devdata *);
|
||||
void ipath_restart_sdma(struct ipath_devdata *);
|
||||
void ipath_sdma_intr(struct ipath_devdata *);
|
||||
int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
|
||||
u32, struct ipath_verbs_txreq *);
|
||||
/* ipath_sdma_lock should be locked before calling this. */
|
||||
int ipath_sdma_make_progress(struct ipath_devdata *dd);
|
||||
|
||||
/* must be called under ipath_sdma_lock */
|
||||
static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
|
||||
{
|
||||
return dd->ipath_sdma_descq_cnt -
|
||||
(dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
|
||||
1 - dd->ipath_sdma_desc_nreserved;
|
||||
}
|
||||
|
||||
static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
|
||||
{
|
||||
dd->ipath_sdma_desc_nreserved += cnt;
|
||||
}
|
||||
|
||||
static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
|
||||
{
|
||||
dd->ipath_sdma_desc_nreserved -= cnt;
|
||||
}
|
||||
|
||||
/*
|
||||
* number of words used for protocol header if not set by ipath_userinit();
|
||||
*/
|
||||
@ -875,6 +1062,8 @@ void ipath_release_user_pages(struct page **, size_t);
|
||||
void ipath_release_user_pages_on_close(struct page **, size_t);
|
||||
int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
|
||||
int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
|
||||
int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
|
||||
int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
|
||||
|
||||
/* these are used for the registers that vary with port */
|
||||
void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
|
||||
@ -891,8 +1080,7 @@ void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
|
||||
|
||||
/*
|
||||
* At the moment, none of the s-registers are writable, so no
|
||||
* ipath_write_sreg(), and none of the c-registers are writable, so no
|
||||
* ipath_write_creg().
|
||||
* ipath_write_sreg().
|
||||
*/
|
||||
|
||||
/**
|
||||
@ -1001,6 +1189,27 @@ static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
|
||||
pd->port_rcvhdrtail_kvaddr));
|
||||
}
|
||||
|
||||
static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
|
||||
{
|
||||
const struct ipath_devdata *dd = pd->port_dd;
|
||||
u32 hdrqtail;
|
||||
|
||||
if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
|
||||
__le32 *rhf_addr;
|
||||
u32 seq;
|
||||
|
||||
rhf_addr = (__le32 *) pd->port_rcvhdrq +
|
||||
pd->port_head + dd->ipath_rhf_offset;
|
||||
seq = ipath_hdrget_seq(rhf_addr);
|
||||
hdrqtail = pd->port_head;
|
||||
if (seq == pd->port_seq_cnt)
|
||||
hdrqtail++;
|
||||
} else
|
||||
hdrqtail = ipath_get_rcvhdrtail(pd);
|
||||
|
||||
return hdrqtail;
|
||||
}
|
||||
|
||||
static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
|
||||
{
|
||||
return (dd->ipath_flags & IPATH_INTREG_64) ?
|
||||
@ -1028,6 +1237,21 @@ static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
|
||||
dd->ibcs_lts_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* from contents of IBCStatus (or a saved copy), return logical link state
|
||||
* combination of link state and linktraining state (down, active, init,
|
||||
* arm, etc.
|
||||
*/
|
||||
static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
|
||||
{
|
||||
u32 ibs;
|
||||
ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
|
||||
dd->ibcs_lts_mask;
|
||||
ibs |= (u32)(ibcs &
|
||||
(INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
|
||||
return ibs;
|
||||
}
|
||||
|
||||
/*
|
||||
* sysfs interface.
|
||||
*/
|
||||
@ -1053,6 +1277,7 @@ int ipathfs_remove_device(struct ipath_devdata *);
|
||||
dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
|
||||
size_t, int);
|
||||
dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
|
||||
const char *ipath_get_unit_name(int unit);
|
||||
|
||||
/*
|
||||
* Flush write combining store buffers (if present) and perform a write
|
||||
@ -1065,11 +1290,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
|
||||
#endif
|
||||
|
||||
extern unsigned ipath_debug; /* debugging bit mask */
|
||||
|
||||
#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */
|
||||
|
||||
const char *ipath_get_unit_name(int unit);
|
||||
|
||||
extern unsigned ipath_linkrecovery;
|
||||
extern unsigned ipath_mtu4096;
|
||||
extern struct mutex ipath_mutex;
|
||||
|
||||
#define IPATH_DRV_NAME "ib_ipath"
|
||||
@ -1096,7 +1318,7 @@ extern struct mutex ipath_mutex;
|
||||
|
||||
# define __IPATH_DBG_WHICH(which,fmt,...) \
|
||||
do { \
|
||||
if(unlikely(ipath_debug&(which))) \
|
||||
if (unlikely(ipath_debug & (which))) \
|
||||
printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
|
||||
__func__,##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -146,6 +146,15 @@ static int recv_subn_get_guidinfo(struct ib_smp *smp,
|
||||
return reply(smp);
|
||||
}
|
||||
|
||||
static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
|
||||
{
|
||||
(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
|
||||
}
|
||||
|
||||
static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
|
||||
{
|
||||
(void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
|
||||
}
|
||||
|
||||
static int get_overrunthreshold(struct ipath_devdata *dd)
|
||||
{
|
||||
@ -226,6 +235,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
||||
struct ib_device *ibdev, u8 port)
|
||||
{
|
||||
struct ipath_ibdev *dev;
|
||||
struct ipath_devdata *dd;
|
||||
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
|
||||
u16 lid;
|
||||
u8 ibcstat;
|
||||
@ -239,6 +249,7 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
||||
}
|
||||
|
||||
dev = to_idev(ibdev);
|
||||
dd = dev->dd;
|
||||
|
||||
/* Clear all fields. Only set the non-zero fields. */
|
||||
memset(smp->data, 0, sizeof(smp->data));
|
||||
@ -248,25 +259,28 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
||||
dev->mkeyprot == 0)
|
||||
pip->mkey = dev->mkey;
|
||||
pip->gid_prefix = dev->gid_prefix;
|
||||
lid = dev->dd->ipath_lid;
|
||||
lid = dd->ipath_lid;
|
||||
pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
|
||||
pip->sm_lid = cpu_to_be16(dev->sm_lid);
|
||||
pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
|
||||
/* pip->diag_code; */
|
||||
pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
|
||||
pip->local_port_num = port;
|
||||
pip->link_width_enabled = dev->link_width_enabled;
|
||||
pip->link_width_supported = 3; /* 1x or 4x */
|
||||
pip->link_width_active = 2; /* 4x */
|
||||
pip->linkspeed_portstate = 0x10; /* 2.5Gbps */
|
||||
ibcstat = dev->dd->ipath_lastibcstat;
|
||||
pip->linkspeed_portstate |= ((ibcstat >> 4) & 0x3) + 1;
|
||||
pip->link_width_enabled = dd->ipath_link_width_enabled;
|
||||
pip->link_width_supported = dd->ipath_link_width_supported;
|
||||
pip->link_width_active = dd->ipath_link_width_active;
|
||||
pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
|
||||
ibcstat = dd->ipath_lastibcstat;
|
||||
/* map LinkState to IB portinfo values. */
|
||||
pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
|
||||
|
||||
pip->portphysstate_linkdown =
|
||||
(ipath_cvt_physportstate[ibcstat & 0xf] << 4) |
|
||||
(get_linkdowndefaultstate(dev->dd) ? 1 : 2);
|
||||
pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dev->dd->ipath_lmc;
|
||||
pip->linkspeedactive_enabled = 0x11; /* 2.5Gbps, 2.5Gbps */
|
||||
switch (dev->dd->ipath_ibmtu) {
|
||||
(ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
|
||||
(get_linkdowndefaultstate(dd) ? 1 : 2);
|
||||
pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
|
||||
pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
|
||||
dd->ipath_link_speed_enabled;
|
||||
switch (dd->ipath_ibmtu) {
|
||||
case 4096:
|
||||
mtu = IB_MTU_4096;
|
||||
break;
|
||||
@ -292,19 +306,15 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
||||
/* pip->vl_arb_high_cap; // only one VL */
|
||||
/* pip->vl_arb_low_cap; // only one VL */
|
||||
/* InitTypeReply = 0 */
|
||||
/*
|
||||
* Note: the chips support a maximum MTU of 4096, but the driver
|
||||
* hasn't implemented this feature yet, so set the maximum value
|
||||
* to 2048.
|
||||
*/
|
||||
pip->inittypereply_mtucap = IB_MTU_2048;
|
||||
// HCAs ignore VLStallCount and HOQLife
|
||||
/* our mtu cap depends on whether 4K MTU enabled or not */
|
||||
pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
|
||||
/* HCAs ignore VLStallCount and HOQLife */
|
||||
/* pip->vlstallcnt_hoqlife; */
|
||||
pip->operationalvl_pei_peo_fpi_fpo = 0x10; /* OVLs = 1 */
|
||||
pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
|
||||
/* P_KeyViolations are counted by hardware. */
|
||||
pip->pkey_violations =
|
||||
cpu_to_be16((ipath_get_cr_errpkey(dev->dd) -
|
||||
cpu_to_be16((ipath_get_cr_errpkey(dd) -
|
||||
dev->z_pkey_violations) & 0xFFFF);
|
||||
pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
|
||||
/* Only the hardware GUID is supported for now */
|
||||
@ -313,10 +323,17 @@ static int recv_subn_get_portinfo(struct ib_smp *smp,
|
||||
/* 32.768 usec. response time (guessing) */
|
||||
pip->resv_resptimevalue = 3;
|
||||
pip->localphyerrors_overrunerrors =
|
||||
(get_phyerrthreshold(dev->dd) << 4) |
|
||||
get_overrunthreshold(dev->dd);
|
||||
(get_phyerrthreshold(dd) << 4) |
|
||||
get_overrunthreshold(dd);
|
||||
/* pip->max_credit_hint; */
|
||||
/* pip->link_roundtrip_latency[3]; */
|
||||
if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
|
||||
u32 v;
|
||||
|
||||
v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
|
||||
pip->link_roundtrip_latency[0] = v >> 16;
|
||||
pip->link_roundtrip_latency[1] = v >> 8;
|
||||
pip->link_roundtrip_latency[2] = v;
|
||||
}
|
||||
|
||||
ret = reply(smp);
|
||||
|
||||
@ -444,19 +461,25 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
|
||||
ib_dispatch_event(&event);
|
||||
}
|
||||
|
||||
/* Only 4x supported but allow 1x or 4x to be set (see 14.2.6.6). */
|
||||
/* Allow 1x or 4x to be set (see 14.2.6.6). */
|
||||
lwe = pip->link_width_enabled;
|
||||
if ((lwe >= 4 && lwe <= 8) || (lwe >= 0xC && lwe <= 0xFE))
|
||||
goto err;
|
||||
if (lwe == 0xFF)
|
||||
dev->link_width_enabled = 3; /* 1x or 4x */
|
||||
else if (lwe)
|
||||
dev->link_width_enabled = lwe;
|
||||
if (lwe) {
|
||||
if (lwe == 0xFF)
|
||||
lwe = dd->ipath_link_width_supported;
|
||||
else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
|
||||
goto err;
|
||||
set_link_width_enabled(dd, lwe);
|
||||
}
|
||||
|
||||
/* Only 2.5 Gbs supported. */
|
||||
/* Allow 2.5 or 5.0 Gbs. */
|
||||
lse = pip->linkspeedactive_enabled & 0xF;
|
||||
if (lse >= 2 && lse <= 0xE)
|
||||
goto err;
|
||||
if (lse) {
|
||||
if (lse == 15)
|
||||
lse = dd->ipath_link_speed_supported;
|
||||
else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
|
||||
goto err;
|
||||
set_link_speed_enabled(dd, lse);
|
||||
}
|
||||
|
||||
/* Set link down default state. */
|
||||
switch (pip->portphysstate_linkdown & 0xF) {
|
||||
@ -491,6 +514,8 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
|
||||
mtu = 2048;
|
||||
break;
|
||||
case IB_MTU_4096:
|
||||
if (!ipath_mtu4096)
|
||||
goto err;
|
||||
mtu = 4096;
|
||||
break;
|
||||
default:
|
||||
@ -565,6 +590,10 @@ static int recv_subn_set_portinfo(struct ib_smp *smp,
|
||||
else
|
||||
goto err;
|
||||
ipath_set_linkstate(dd, lstate);
|
||||
if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
|
||||
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
|
||||
goto done;
|
||||
}
|
||||
ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
|
||||
IPATH_LINKACTIVE, 1000);
|
||||
break;
|
||||
@ -948,10 +977,14 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
|
||||
* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
|
||||
* intervals are counted in ticks. Since we use Linux timers, that
|
||||
* count in jiffies, we can't sample for less than 1000 ticks if HZ
|
||||
* == 1000 (4000 ticks if HZ is 250).
|
||||
* == 1000 (4000 ticks if HZ is 250). link_speed_active returns 2 for
|
||||
* DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
|
||||
* have hardware support for delaying packets.
|
||||
*/
|
||||
/* XXX This is WRONG. */
|
||||
p->tick = 250; /* 1 usec. */
|
||||
if (crp->cr_psstat)
|
||||
p->tick = dev->dd->ipath_link_speed_active - 1;
|
||||
else
|
||||
p->tick = 250; /* 1 usec. */
|
||||
p->counter_width = 4; /* 32 bit counters */
|
||||
p->counter_mask0_9 = COUNTER_MASK0_9;
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
@ -1364,7 +1397,8 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
|
||||
}
|
||||
|
||||
/* Is the mkey in the process of expiring? */
|
||||
if (dev->mkey_lease_timeout && jiffies >= dev->mkey_lease_timeout) {
|
||||
if (dev->mkey_lease_timeout &&
|
||||
time_after_eq(jiffies, dev->mkey_lease_timeout)) {
|
||||
/* Clear timeout and mkey protection field. */
|
||||
dev->mkey_lease_timeout = 0;
|
||||
dev->mkeyprot = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -340,6 +340,7 @@ static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
|
||||
qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
|
||||
qp->s_hdrwords = 0;
|
||||
qp->s_wqe = NULL;
|
||||
qp->s_pkt_delay = 0;
|
||||
qp->s_psn = 0;
|
||||
qp->r_psn = 0;
|
||||
qp->r_msn = 0;
|
||||
@ -392,7 +393,6 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
|
||||
qp->ibqp.qp_num, qp->remote_qpn, err);
|
||||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
@ -516,13 +516,13 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
goto inval;
|
||||
|
||||
/*
|
||||
* Note: the chips support a maximum MTU of 4096, but the driver
|
||||
* hasn't implemented this feature yet, so don't allow Path MTU
|
||||
* values greater than 2048.
|
||||
* don't allow invalid Path MTU values or greater than 2048
|
||||
* unless we are configured for a 4KB MTU
|
||||
*/
|
||||
if (attr_mask & IB_QP_PATH_MTU)
|
||||
if (attr->path_mtu > IB_MTU_2048)
|
||||
goto inval;
|
||||
if ((attr_mask & IB_QP_PATH_MTU) &&
|
||||
(ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
|
||||
(attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
|
||||
goto inval;
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MIG_STATE)
|
||||
if (attr->path_mig_state != IB_MIG_MIGRATED &&
|
||||
@ -564,8 +564,10 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
if (attr_mask & IB_QP_ACCESS_FLAGS)
|
||||
qp->qp_access_flags = attr->qp_access_flags;
|
||||
|
||||
if (attr_mask & IB_QP_AV)
|
||||
if (attr_mask & IB_QP_AV) {
|
||||
qp->remote_ah_attr = attr->ah_attr;
|
||||
qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_PATH_MTU)
|
||||
qp->path_mtu = attr->path_mtu;
|
||||
@ -748,22 +750,33 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
||||
size_t sz;
|
||||
struct ib_qp *ret;
|
||||
|
||||
if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
|
||||
init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
|
||||
init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
|
||||
init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
if (init_attr->create_flags) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (init_attr->cap.max_send_sge +
|
||||
init_attr->cap.max_recv_sge +
|
||||
init_attr->cap.max_send_wr +
|
||||
init_attr->cap.max_recv_wr == 0) {
|
||||
if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
|
||||
init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Check receive queue parameters if no SRQ is specified. */
|
||||
if (!init_attr->srq) {
|
||||
if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
|
||||
init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto bail;
|
||||
}
|
||||
if (init_attr->cap.max_send_sge +
|
||||
init_attr->cap.max_send_wr +
|
||||
init_attr->cap.max_recv_sge +
|
||||
init_attr->cap.max_recv_wr == 0) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_UC:
|
||||
case IB_QPT_RC:
|
||||
@ -840,6 +853,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
|
||||
goto bail_qp;
|
||||
}
|
||||
qp->ip = NULL;
|
||||
qp->s_tx = NULL;
|
||||
ipath_reset_qp(qp, init_attr->qp_type);
|
||||
break;
|
||||
|
||||
@ -945,12 +959,20 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
|
||||
/* Stop the sending tasklet. */
|
||||
tasklet_kill(&qp->s_task);
|
||||
|
||||
if (qp->s_tx) {
|
||||
atomic_dec(&qp->refcount);
|
||||
if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
|
||||
kfree(qp->s_tx->txreq.map_addr);
|
||||
}
|
||||
|
||||
/* Make sure the QP isn't on the timeout list. */
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
list_del_init(&qp->piowait);
|
||||
if (qp->s_tx)
|
||||
list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
|
||||
/*
|
||||
@ -1021,7 +1043,6 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
|
||||
qp->ibqp.qp_num, qp->remote_qpn, wc->status);
|
||||
|
||||
spin_lock(&dev->pending_lock);
|
||||
/* XXX What if its already removed by the timeout code? */
|
||||
if (!list_empty(&qp->timerwait))
|
||||
list_del_init(&qp->timerwait);
|
||||
if (!list_empty(&qp->piowait))
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -31,6 +31,8 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
|
||||
#include "ipath_verbs.h"
|
||||
#include "ipath_kernel.h"
|
||||
|
||||
@ -306,7 +308,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
|
||||
else {
|
||||
qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
}
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
@ -344,7 +346,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
|
||||
qp->s_state =
|
||||
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after RETH */
|
||||
ohdr->u.rc.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= 1 << 23;
|
||||
@ -488,7 +490,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
|
||||
else {
|
||||
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
}
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
@ -524,7 +526,7 @@ int ipath_make_rc_req(struct ipath_qp *qp)
|
||||
else {
|
||||
qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= 1 << 23;
|
||||
@ -585,19 +587,39 @@ bail:
|
||||
static void send_rc_ack(struct ipath_qp *qp)
|
||||
{
|
||||
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
||||
struct ipath_devdata *dd;
|
||||
u16 lrh0;
|
||||
u32 bth0;
|
||||
u32 hwords;
|
||||
u32 __iomem *piobuf;
|
||||
struct ipath_ib_header hdr;
|
||||
struct ipath_other_headers *ohdr;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
|
||||
/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
|
||||
if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
|
||||
(qp->s_flags & IPATH_S_ACK_PENDING) ||
|
||||
qp->s_ack_state != OP(ACKNOWLEDGE))
|
||||
goto queue_ack;
|
||||
|
||||
spin_unlock_irqrestore(&qp->s_lock, flags);
|
||||
|
||||
dd = dev->dd;
|
||||
piobuf = ipath_getpiobuf(dd, 0, NULL);
|
||||
if (!piobuf) {
|
||||
/*
|
||||
* We are out of PIO buffers at the moment.
|
||||
* Pass responsibility for sending the ACK to the
|
||||
* send tasklet so that when a PIO buffer becomes
|
||||
* available, the ACK is sent ahead of other outgoing
|
||||
* packets.
|
||||
*/
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
goto queue_ack;
|
||||
}
|
||||
|
||||
/* Construct the header. */
|
||||
ohdr = &hdr.u.oth;
|
||||
lrh0 = IPATH_LRH_BTH;
|
||||
@ -611,7 +633,7 @@ static void send_rc_ack(struct ipath_qp *qp)
|
||||
lrh0 = IPATH_LRH_GRH;
|
||||
}
|
||||
/* read pkey_index w/o lock (its atomic) */
|
||||
bth0 = ipath_get_pkey(dev->dd, qp->s_pkey_index) |
|
||||
bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
|
||||
(OP(ACKNOWLEDGE) << 24) | (1 << 22);
|
||||
if (qp->r_nak_state)
|
||||
ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
|
||||
@ -623,30 +645,29 @@ static void send_rc_ack(struct ipath_qp *qp)
|
||||
hdr.lrh[0] = cpu_to_be16(lrh0);
|
||||
hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
|
||||
hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
|
||||
hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
|
||||
hdr.lrh[3] = cpu_to_be16(dd->ipath_lid);
|
||||
ohdr->bth[0] = cpu_to_be32(bth0);
|
||||
ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
|
||||
ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
|
||||
|
||||
/*
|
||||
* If we can send the ACK, clear the ACK state.
|
||||
*/
|
||||
if (ipath_verbs_send(qp, &hdr, hwords, NULL, 0) == 0) {
|
||||
dev->n_unicast_xmit++;
|
||||
goto done;
|
||||
}
|
||||
writeq(hwords + 1, piobuf);
|
||||
|
||||
/*
|
||||
* We are out of PIO buffers at the moment.
|
||||
* Pass responsibility for sending the ACK to the
|
||||
* send tasklet so that when a PIO buffer becomes
|
||||
* available, the ACK is sent ahead of other outgoing
|
||||
* packets.
|
||||
*/
|
||||
dev->n_rc_qacks++;
|
||||
if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
|
||||
u32 *hdrp = (u32 *) &hdr;
|
||||
|
||||
ipath_flush_wc();
|
||||
__iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
|
||||
ipath_flush_wc();
|
||||
__raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
|
||||
} else
|
||||
__iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
|
||||
|
||||
ipath_flush_wc();
|
||||
|
||||
dev->n_unicast_xmit++;
|
||||
goto done;
|
||||
|
||||
queue_ack:
|
||||
spin_lock_irqsave(&qp->s_lock, flags);
|
||||
dev->n_rc_qacks++;
|
||||
qp->s_flags |= IPATH_S_ACK_PENDING;
|
||||
qp->s_nak_state = qp->r_nak_state;
|
||||
|
@ -63,67 +63,92 @@
|
||||
/* kr_control bits */
|
||||
#define INFINIPATH_C_FREEZEMODE 0x00000002
|
||||
#define INFINIPATH_C_LINKENABLE 0x00000004
|
||||
#define INFINIPATH_C_RESET 0x00000001
|
||||
|
||||
/* kr_sendctrl bits */
|
||||
#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
|
||||
#define INFINIPATH_S_UPDTHRESH_SHIFT 24
|
||||
#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
|
||||
|
||||
#define IPATH_S_ABORT 0
|
||||
#define IPATH_S_PIOINTBUFAVAIL 1
|
||||
#define IPATH_S_PIOBUFAVAILUPD 2
|
||||
#define IPATH_S_PIOENABLE 3
|
||||
#define IPATH_S_SDMAINTENABLE 9
|
||||
#define IPATH_S_SDMASINGLEDESCRIPTOR 10
|
||||
#define IPATH_S_SDMAENABLE 11
|
||||
#define IPATH_S_SDMAHALT 12
|
||||
#define IPATH_S_DISARM 31
|
||||
|
||||
#define INFINIPATH_S_ABORT (1U << IPATH_S_ABORT)
|
||||
#define INFINIPATH_S_PIOINTBUFAVAIL (1U << IPATH_S_PIOINTBUFAVAIL)
|
||||
#define INFINIPATH_S_PIOBUFAVAILUPD (1U << IPATH_S_PIOBUFAVAILUPD)
|
||||
#define INFINIPATH_S_PIOENABLE (1U << IPATH_S_PIOENABLE)
|
||||
#define INFINIPATH_S_SDMAINTENABLE (1U << IPATH_S_SDMAINTENABLE)
|
||||
#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
|
||||
(1U << IPATH_S_SDMASINGLEDESCRIPTOR)
|
||||
#define INFINIPATH_S_SDMAENABLE (1U << IPATH_S_SDMAENABLE)
|
||||
#define INFINIPATH_S_SDMAHALT (1U << IPATH_S_SDMAHALT)
|
||||
#define INFINIPATH_S_DISARM (1U << IPATH_S_DISARM)
|
||||
|
||||
/* kr_rcvctrl bits */
|
||||
/* kr_rcvctrl bits that are the same on multiple chips */
|
||||
#define INFINIPATH_R_PORTENABLE_SHIFT 0
|
||||
#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
|
||||
|
||||
/* kr_intstatus, kr_intclear, kr_intmask bits */
|
||||
#define INFINIPATH_I_RCVURG_SHIFT 0
|
||||
#define INFINIPATH_I_RCVAVAIL_SHIFT 12
|
||||
#define INFINIPATH_I_ERROR 0x80000000
|
||||
#define INFINIPATH_I_SPIOSENT 0x40000000
|
||||
#define INFINIPATH_I_SPIOBUFAVAIL 0x20000000
|
||||
#define INFINIPATH_I_GPIO 0x10000000
|
||||
#define INFINIPATH_I_SDMAINT 0x8000000000000000ULL
|
||||
#define INFINIPATH_I_SDMADISABLED 0x4000000000000000ULL
|
||||
#define INFINIPATH_I_ERROR 0x0000000080000000ULL
|
||||
#define INFINIPATH_I_SPIOSENT 0x0000000040000000ULL
|
||||
#define INFINIPATH_I_SPIOBUFAVAIL 0x0000000020000000ULL
|
||||
#define INFINIPATH_I_GPIO 0x0000000010000000ULL
|
||||
#define INFINIPATH_I_JINT 0x0000000004000000ULL
|
||||
|
||||
/* kr_errorstatus, kr_errorclear, kr_errormask bits */
|
||||
#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
|
||||
#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
|
||||
#define INFINIPATH_E_RICRC 0x0000000000000004ULL
|
||||
#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
|
||||
#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
|
||||
#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
|
||||
#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
|
||||
#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
|
||||
#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
|
||||
#define INFINIPATH_E_REBP 0x0000000000000200ULL
|
||||
#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
|
||||
#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
|
||||
#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
|
||||
#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
|
||||
#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
|
||||
#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
|
||||
#define INFINIPATH_E_RHDR 0x0000000000010000ULL
|
||||
#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
|
||||
#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
|
||||
#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
|
||||
#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
|
||||
#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
|
||||
#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
|
||||
#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
|
||||
#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
|
||||
#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
|
||||
#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
|
||||
#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
|
||||
#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
|
||||
#define INFINIPATH_E_RESET 0x0004000000000000ULL
|
||||
#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
|
||||
#define INFINIPATH_E_RFORMATERR 0x0000000000000001ULL
|
||||
#define INFINIPATH_E_RVCRC 0x0000000000000002ULL
|
||||
#define INFINIPATH_E_RICRC 0x0000000000000004ULL
|
||||
#define INFINIPATH_E_RMINPKTLEN 0x0000000000000008ULL
|
||||
#define INFINIPATH_E_RMAXPKTLEN 0x0000000000000010ULL
|
||||
#define INFINIPATH_E_RLONGPKTLEN 0x0000000000000020ULL
|
||||
#define INFINIPATH_E_RSHORTPKTLEN 0x0000000000000040ULL
|
||||
#define INFINIPATH_E_RUNEXPCHAR 0x0000000000000080ULL
|
||||
#define INFINIPATH_E_RUNSUPVL 0x0000000000000100ULL
|
||||
#define INFINIPATH_E_REBP 0x0000000000000200ULL
|
||||
#define INFINIPATH_E_RIBFLOW 0x0000000000000400ULL
|
||||
#define INFINIPATH_E_RBADVERSION 0x0000000000000800ULL
|
||||
#define INFINIPATH_E_RRCVEGRFULL 0x0000000000001000ULL
|
||||
#define INFINIPATH_E_RRCVHDRFULL 0x0000000000002000ULL
|
||||
#define INFINIPATH_E_RBADTID 0x0000000000004000ULL
|
||||
#define INFINIPATH_E_RHDRLEN 0x0000000000008000ULL
|
||||
#define INFINIPATH_E_RHDR 0x0000000000010000ULL
|
||||
#define INFINIPATH_E_RIBLOSTLINK 0x0000000000020000ULL
|
||||
#define INFINIPATH_E_SENDSPECIALTRIGGER 0x0000000008000000ULL
|
||||
#define INFINIPATH_E_SDMADISABLED 0x0000000010000000ULL
|
||||
#define INFINIPATH_E_SMINPKTLEN 0x0000000020000000ULL
|
||||
#define INFINIPATH_E_SMAXPKTLEN 0x0000000040000000ULL
|
||||
#define INFINIPATH_E_SUNDERRUN 0x0000000080000000ULL
|
||||
#define INFINIPATH_E_SPKTLEN 0x0000000100000000ULL
|
||||
#define INFINIPATH_E_SDROPPEDSMPPKT 0x0000000200000000ULL
|
||||
#define INFINIPATH_E_SDROPPEDDATAPKT 0x0000000400000000ULL
|
||||
#define INFINIPATH_E_SPIOARMLAUNCH 0x0000000800000000ULL
|
||||
#define INFINIPATH_E_SUNEXPERRPKTNUM 0x0000001000000000ULL
|
||||
#define INFINIPATH_E_SUNSUPVL 0x0000002000000000ULL
|
||||
#define INFINIPATH_E_SENDBUFMISUSE 0x0000004000000000ULL
|
||||
#define INFINIPATH_E_SDMAGENMISMATCH 0x0000008000000000ULL
|
||||
#define INFINIPATH_E_SDMAOUTOFBOUND 0x0000010000000000ULL
|
||||
#define INFINIPATH_E_SDMATAILOUTOFBOUND 0x0000020000000000ULL
|
||||
#define INFINIPATH_E_SDMABASE 0x0000040000000000ULL
|
||||
#define INFINIPATH_E_SDMA1STDESC 0x0000080000000000ULL
|
||||
#define INFINIPATH_E_SDMARPYTAG 0x0000100000000000ULL
|
||||
#define INFINIPATH_E_SDMADWEN 0x0000200000000000ULL
|
||||
#define INFINIPATH_E_SDMAMISSINGDW 0x0000400000000000ULL
|
||||
#define INFINIPATH_E_SDMAUNEXPDATA 0x0000800000000000ULL
|
||||
#define INFINIPATH_E_IBSTATUSCHANGED 0x0001000000000000ULL
|
||||
#define INFINIPATH_E_INVALIDADDR 0x0002000000000000ULL
|
||||
#define INFINIPATH_E_RESET 0x0004000000000000ULL
|
||||
#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
|
||||
#define INFINIPATH_E_SDMADESCADDRMISALIGN 0x0010000000000000ULL
|
||||
#define INFINIPATH_E_INVALIDEEPCMD 0x0020000000000000ULL
|
||||
|
||||
/*
|
||||
* this is used to print "common" packet errors only when the
|
||||
@ -134,6 +159,17 @@
|
||||
| INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
|
||||
| INFINIPATH_E_REBP )
|
||||
|
||||
/* Convenience for decoding Send DMA errors */
|
||||
#define INFINIPATH_E_SDMAERRS ( \
|
||||
INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
|
||||
INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
|
||||
INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
|
||||
INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
|
||||
INFINIPATH_E_SDMAUNEXPDATA | \
|
||||
INFINIPATH_E_SDMADESCADDRMISALIGN | \
|
||||
INFINIPATH_E_SDMADISABLED | \
|
||||
INFINIPATH_E_SENDBUFMISUSE)
|
||||
|
||||
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
|
||||
/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
|
||||
* RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
|
||||
@ -158,7 +194,7 @@
|
||||
#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
|
||||
/* waldo specific -- find the rest in ipath_6110.c */
|
||||
#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR 0x0000000400000000ULL
|
||||
/* monty specific -- find the rest in ipath_6120.c */
|
||||
/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
|
||||
#define INFINIPATH_HWE_MEMBISTFAILED 0x0040000000000000ULL
|
||||
|
||||
/* kr_hwdiagctrl bits */
|
||||
@ -185,8 +221,8 @@
|
||||
#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
|
||||
#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
|
||||
#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
|
||||
#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
|
||||
#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
|
||||
#define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */
|
||||
#define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */
|
||||
#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
|
||||
#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
|
||||
#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
|
||||
@ -201,10 +237,9 @@
|
||||
#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
|
||||
|
||||
/* kr_ibcstatus bits */
|
||||
#define INFINIPATH_IBCS_LINKTRAININGSTATE_MASK 0xF
|
||||
#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
|
||||
#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
|
||||
#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
|
||||
|
||||
#define INFINIPATH_IBCS_TXREADY 0x40000000
|
||||
#define INFINIPATH_IBCS_TXCREDITOK 0x80000000
|
||||
/* link training states (shift by
|
||||
@ -222,30 +257,13 @@
|
||||
#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN 0x0c
|
||||
#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT 0x0e
|
||||
#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE 0x0f
|
||||
/* link state machine states (shift by INFINIPATH_IBCS_LINKSTATE_SHIFT) */
|
||||
/* link state machine states (shift by ibcs_ls_shift) */
|
||||
#define INFINIPATH_IBCS_L_STATE_DOWN 0x0
|
||||
#define INFINIPATH_IBCS_L_STATE_INIT 0x1
|
||||
#define INFINIPATH_IBCS_L_STATE_ARM 0x2
|
||||
#define INFINIPATH_IBCS_L_STATE_ACTIVE 0x3
|
||||
#define INFINIPATH_IBCS_L_STATE_ACT_DEFER 0x4
|
||||
|
||||
/* combination link status states that we use with some frequency */
|
||||
#define IPATH_IBSTATE_MASK ((INFINIPATH_IBCS_LINKTRAININGSTATE_MASK \
|
||||
<< INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | \
|
||||
(INFINIPATH_IBCS_LINKSTATE_MASK \
|
||||
<<INFINIPATH_IBCS_LINKSTATE_SHIFT))
|
||||
#define IPATH_IBSTATE_INIT ((INFINIPATH_IBCS_L_STATE_INIT \
|
||||
<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
|
||||
(INFINIPATH_IBCS_LT_STATE_LINKUP \
|
||||
<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
|
||||
#define IPATH_IBSTATE_ARM ((INFINIPATH_IBCS_L_STATE_ARM \
|
||||
<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
|
||||
(INFINIPATH_IBCS_LT_STATE_LINKUP \
|
||||
<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
|
||||
#define IPATH_IBSTATE_ACTIVE ((INFINIPATH_IBCS_L_STATE_ACTIVE \
|
||||
<< INFINIPATH_IBCS_LINKSTATE_SHIFT) | \
|
||||
(INFINIPATH_IBCS_LT_STATE_LINKUP \
|
||||
<<INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT))
|
||||
|
||||
/* kr_extstatus bits */
|
||||
#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
|
||||
@ -286,8 +304,7 @@
|
||||
/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
|
||||
#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
|
||||
|
||||
/* kr_xgxsconfig bits */
|
||||
#define INFINIPATH_XGXS_RESET 0x7ULL
|
||||
/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
|
||||
#define INFINIPATH_XGXS_RX_POL_SHIFT 19
|
||||
#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
|
||||
|
||||
@ -417,6 +434,29 @@ struct ipath_kregs {
|
||||
ipath_kreg kr_pcieq1serdesconfig0;
|
||||
ipath_kreg kr_pcieq1serdesconfig1;
|
||||
ipath_kreg kr_pcieq1serdesstatus;
|
||||
ipath_kreg kr_hrtbt_guid;
|
||||
ipath_kreg kr_ibcddrctrl;
|
||||
ipath_kreg kr_ibcddrstatus;
|
||||
ipath_kreg kr_jintreload;
|
||||
|
||||
/* send dma related regs */
|
||||
ipath_kreg kr_senddmabase;
|
||||
ipath_kreg kr_senddmalengen;
|
||||
ipath_kreg kr_senddmatail;
|
||||
ipath_kreg kr_senddmahead;
|
||||
ipath_kreg kr_senddmaheadaddr;
|
||||
ipath_kreg kr_senddmabufmask0;
|
||||
ipath_kreg kr_senddmabufmask1;
|
||||
ipath_kreg kr_senddmabufmask2;
|
||||
ipath_kreg kr_senddmastatus;
|
||||
|
||||
/* SerDes related regs (IBA7220-only) */
|
||||
ipath_kreg kr_ibserdesctrl;
|
||||
ipath_kreg kr_ib_epbacc;
|
||||
ipath_kreg kr_ib_epbtrans;
|
||||
ipath_kreg kr_pcie_epbacc;
|
||||
ipath_kreg kr_pcie_epbtrans;
|
||||
ipath_kreg kr_ib_ddsrxeq;
|
||||
};
|
||||
|
||||
struct ipath_cregs {
|
||||
|
@ -310,7 +310,7 @@ again:
|
||||
switch (wqe->wr.opcode) {
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
wc.imm_data = wqe->wr.imm_data;
|
||||
wc.imm_data = wqe->wr.ex.imm_data;
|
||||
/* FALLTHROUGH */
|
||||
case IB_WR_SEND:
|
||||
if (!ipath_get_rwqe(qp, 0)) {
|
||||
@ -339,7 +339,7 @@ again:
|
||||
goto err;
|
||||
}
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
wc.imm_data = wqe->wr.imm_data;
|
||||
wc.imm_data = wqe->wr.ex.imm_data;
|
||||
if (!ipath_get_rwqe(qp, 1))
|
||||
goto rnr_nak;
|
||||
/* FALLTHROUGH */
|
||||
@ -483,14 +483,16 @@ done:
|
||||
|
||||
static void want_buffer(struct ipath_devdata *dd)
|
||||
{
|
||||
unsigned long flags;
|
||||
if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
1462
drivers/infiniband/hw/ipath/ipath_sd7220.c
Normal file
1462
drivers/infiniband/hw/ipath/ipath_sd7220.c
Normal file
File diff suppressed because it is too large
Load Diff
1082
drivers/infiniband/hw/ipath/ipath_sd7220_img.c
Normal file
1082
drivers/infiniband/hw/ipath/ipath_sd7220_img.c
Normal file
File diff suppressed because it is too large
Load Diff
790
drivers/infiniband/hw/ipath/ipath_sdma.c
Normal file
790
drivers/infiniband/hw/ipath/ipath_sdma.c
Normal file
@ -0,0 +1,790 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_verbs.h"
|
||||
#include "ipath_common.h"
|
||||
|
||||
#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
|
||||
|
||||
static void vl15_watchdog_enq(struct ipath_devdata *dd)
|
||||
{
|
||||
/* ipath_sdma_lock must already be held */
|
||||
if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
|
||||
unsigned long interval = (HZ + 19) / 20;
|
||||
dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
|
||||
add_timer(&dd->ipath_sdma_vl15_timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void vl15_watchdog_deq(struct ipath_devdata *dd)
|
||||
{
|
||||
/* ipath_sdma_lock must already be held */
|
||||
if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
|
||||
unsigned long interval = (HZ + 19) / 20;
|
||||
mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
|
||||
} else {
|
||||
del_timer(&dd->ipath_sdma_vl15_timer);
|
||||
}
|
||||
}
|
||||
|
||||
static void vl15_watchdog_timeout(unsigned long opaque)
|
||||
{
|
||||
struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
|
||||
|
||||
if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
|
||||
ipath_dbg("vl15 watchdog timeout - clearing\n");
|
||||
ipath_cancel_sends(dd, 1);
|
||||
ipath_hol_down(dd);
|
||||
} else {
|
||||
ipath_dbg("vl15 watchdog timeout - "
|
||||
"condition already cleared\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void unmap_desc(struct ipath_devdata *dd, unsigned head)
|
||||
{
|
||||
__le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
|
||||
u64 desc[2];
|
||||
dma_addr_t addr;
|
||||
size_t len;
|
||||
|
||||
desc[0] = le64_to_cpu(descqp[0]);
|
||||
desc[1] = le64_to_cpu(descqp[1]);
|
||||
|
||||
addr = (desc[1] << 32) | (desc[0] >> 32);
|
||||
len = (desc[0] >> 14) & (0x7ffULL << 2);
|
||||
dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
* ipath_sdma_lock should be locked before calling this.
|
||||
*/
|
||||
int ipath_sdma_make_progress(struct ipath_devdata *dd)
|
||||
{
|
||||
struct list_head *lp = NULL;
|
||||
struct ipath_sdma_txreq *txp = NULL;
|
||||
u16 dmahead;
|
||||
u16 start_idx = 0;
|
||||
int progress = 0;
|
||||
|
||||
if (!list_empty(&dd->ipath_sdma_activelist)) {
|
||||
lp = dd->ipath_sdma_activelist.next;
|
||||
txp = list_entry(lp, struct ipath_sdma_txreq, list);
|
||||
start_idx = txp->start_idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the SDMA head register in order to know that the
|
||||
* interrupt clear has been written to the chip.
|
||||
* Otherwise, we may not get an interrupt for the last
|
||||
* descriptor in the queue.
|
||||
*/
|
||||
dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
|
||||
/* sanity check return value for error handling (chip reset, etc.) */
|
||||
if (dmahead >= dd->ipath_sdma_descq_cnt)
|
||||
goto done;
|
||||
|
||||
while (dd->ipath_sdma_descq_head != dmahead) {
|
||||
if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
|
||||
dd->ipath_sdma_descq_head == start_idx) {
|
||||
unmap_desc(dd, dd->ipath_sdma_descq_head);
|
||||
start_idx++;
|
||||
if (start_idx == dd->ipath_sdma_descq_cnt)
|
||||
start_idx = 0;
|
||||
}
|
||||
|
||||
/* increment free count and head */
|
||||
dd->ipath_sdma_descq_removed++;
|
||||
if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
|
||||
dd->ipath_sdma_descq_head = 0;
|
||||
|
||||
if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
|
||||
/* move to notify list */
|
||||
if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
|
||||
vl15_watchdog_deq(dd);
|
||||
list_move_tail(lp, &dd->ipath_sdma_notifylist);
|
||||
if (!list_empty(&dd->ipath_sdma_activelist)) {
|
||||
lp = dd->ipath_sdma_activelist.next;
|
||||
txp = list_entry(lp, struct ipath_sdma_txreq,
|
||||
list);
|
||||
start_idx = txp->start_idx;
|
||||
} else {
|
||||
lp = NULL;
|
||||
txp = NULL;
|
||||
}
|
||||
}
|
||||
progress = 1;
|
||||
}
|
||||
|
||||
if (progress)
|
||||
tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
|
||||
|
||||
done:
|
||||
return progress;
|
||||
}
|
||||
|
||||
static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
|
||||
{
|
||||
struct ipath_sdma_txreq *txp, *txp_next;
|
||||
|
||||
list_for_each_entry_safe(txp, txp_next, list, list) {
|
||||
list_del_init(&txp->list);
|
||||
|
||||
if (txp->callback)
|
||||
(*txp->callback)(txp->callback_cookie,
|
||||
txp->callback_status);
|
||||
}
|
||||
}
|
||||
|
||||
static void sdma_notify_taskbody(struct ipath_devdata *dd)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct list_head list;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
list_splice_init(&dd->ipath_sdma_notifylist, &list);
|
||||
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
ipath_sdma_notify(dd, &list);
|
||||
|
||||
/*
|
||||
* The IB verbs layer needs to see the callback before getting
|
||||
* the call to ipath_ib_piobufavail() because the callback
|
||||
* handles releasing resources the next send will need.
|
||||
* Otherwise, we could do these calls in
|
||||
* ipath_sdma_make_progress().
|
||||
*/
|
||||
ipath_ib_piobufavail(dd->verbs_dev);
|
||||
}
|
||||
|
||||
static void sdma_notify_task(unsigned long opaque)
|
||||
{
|
||||
struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
|
||||
|
||||
if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
sdma_notify_taskbody(dd);
|
||||
}
|
||||
|
||||
static void dump_sdma_state(struct ipath_devdata *dd)
|
||||
{
|
||||
unsigned long reg;
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
|
||||
ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
|
||||
|
||||
reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
|
||||
ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
|
||||
}
|
||||
|
||||
static void sdma_abort_task(unsigned long opaque)
|
||||
{
|
||||
struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
|
||||
u64 status;
|
||||
unsigned long flags;
|
||||
|
||||
if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
|
||||
|
||||
/* nothing to do */
|
||||
if (status == IPATH_SDMA_ABORT_NONE)
|
||||
goto unlock;
|
||||
|
||||
/* ipath_sdma_abort() is done, waiting for interrupt */
|
||||
if (status == IPATH_SDMA_ABORT_DISARMED) {
|
||||
if (jiffies < dd->ipath_sdma_abort_intr_timeout)
|
||||
goto resched_noprint;
|
||||
/* give up, intr got lost somewhere */
|
||||
ipath_dbg("give up waiting for SDMADISABLED intr\n");
|
||||
__set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
||||
status = IPATH_SDMA_ABORT_ABORTED;
|
||||
}
|
||||
|
||||
/* everything is stopped, time to clean up and restart */
|
||||
if (status == IPATH_SDMA_ABORT_ABORTED) {
|
||||
struct ipath_sdma_txreq *txp, *txpnext;
|
||||
u64 hwstatus;
|
||||
int notify = 0;
|
||||
|
||||
hwstatus = ipath_read_kreg64(dd,
|
||||
dd->ipath_kregs->kr_senddmastatus);
|
||||
|
||||
if (/* ScoreBoardDrainInProg */
|
||||
test_bit(63, &hwstatus) ||
|
||||
/* AbortInProg */
|
||||
test_bit(62, &hwstatus) ||
|
||||
/* InternalSDmaEnable */
|
||||
test_bit(61, &hwstatus) ||
|
||||
/* ScbEmpty */
|
||||
!test_bit(30, &hwstatus)) {
|
||||
if (dd->ipath_sdma_reset_wait > 0) {
|
||||
/* not done shutting down sdma */
|
||||
--dd->ipath_sdma_reset_wait;
|
||||
goto resched;
|
||||
}
|
||||
ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
|
||||
"status after SDMA reset, continuing\n");
|
||||
dump_sdma_state(dd);
|
||||
}
|
||||
|
||||
/* dequeue all "sent" requests */
|
||||
list_for_each_entry_safe(txp, txpnext,
|
||||
&dd->ipath_sdma_activelist, list) {
|
||||
txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
|
||||
if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
|
||||
vl15_watchdog_deq(dd);
|
||||
list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
|
||||
notify = 1;
|
||||
}
|
||||
if (notify)
|
||||
tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
|
||||
|
||||
/* reset our notion of head and tail */
|
||||
dd->ipath_sdma_descq_tail = 0;
|
||||
dd->ipath_sdma_descq_head = 0;
|
||||
dd->ipath_sdma_head_dma[0] = 0;
|
||||
dd->ipath_sdma_generation = 0;
|
||||
dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
|
||||
|
||||
/* Reset SendDmaLenGen */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
|
||||
(u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
|
||||
|
||||
/* done with sdma state for a bit */
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
/*
|
||||
* Don't restart sdma here. Wait until link is up to ACTIVE.
|
||||
* VL15 MADs used to bring the link up use PIO, and multiple
|
||||
* link transitions otherwise cause the sdma engine to be
|
||||
* stopped and started multiple times.
|
||||
* The disable is done here, including the shadow, so the
|
||||
* state is kept consistent.
|
||||
* See ipath_restart_sdma() for the actual starting of sdma.
|
||||
*/
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
/* make sure I see next message */
|
||||
dd->ipath_sdma_abort_jiffies = 0;
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
resched:
|
||||
/*
|
||||
* for now, keep spinning
|
||||
* JAG - this is bad to just have default be a loop without
|
||||
* state change
|
||||
*/
|
||||
if (jiffies > dd->ipath_sdma_abort_jiffies) {
|
||||
ipath_dbg("looping with status 0x%016llx\n",
|
||||
dd->ipath_sdma_status);
|
||||
dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
|
||||
}
|
||||
resched_noprint:
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
|
||||
return;
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
done:
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called from interrupt context.
|
||||
*/
|
||||
void ipath_sdma_intr(struct ipath_devdata *dd)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
(void) ipath_sdma_make_progress(dd);
|
||||
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
}
|
||||
|
||||
static int alloc_sdma(struct ipath_devdata *dd)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Allocate memory for SendDMA descriptor FIFO */
|
||||
dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
|
||||
|
||||
if (!dd->ipath_sdma_descq) {
|
||||
ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
|
||||
"FIFO memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
dd->ipath_sdma_descq_cnt =
|
||||
SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
|
||||
|
||||
/* Allocate memory for DMA of head register to memory */
|
||||
dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
|
||||
if (!dd->ipath_sdma_head_dma) {
|
||||
ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto cleanup_descq;
|
||||
}
|
||||
dd->ipath_sdma_head_dma[0] = 0;
|
||||
|
||||
init_timer(&dd->ipath_sdma_vl15_timer);
|
||||
dd->ipath_sdma_vl15_timer.function = vl15_watchdog_timeout;
|
||||
dd->ipath_sdma_vl15_timer.data = (unsigned long)dd;
|
||||
atomic_set(&dd->ipath_sdma_vl15_count, 0);
|
||||
|
||||
goto done;
|
||||
|
||||
cleanup_descq:
|
||||
dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
|
||||
(void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
|
||||
dd->ipath_sdma_descq = NULL;
|
||||
dd->ipath_sdma_descq_phys = 0;
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int setup_sdma(struct ipath_devdata *dd)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned i, n;
|
||||
u64 tmp64;
|
||||
u64 senddmabufmask[3] = { 0 };
|
||||
unsigned long flags;
|
||||
|
||||
ret = alloc_sdma(dd);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
if (!dd->ipath_sdma_descq) {
|
||||
ipath_dev_err(dd, "SendDMA memory not allocated\n");
|
||||
goto done;
|
||||
}
|
||||
|
||||
dd->ipath_sdma_status = 0;
|
||||
dd->ipath_sdma_abort_jiffies = 0;
|
||||
dd->ipath_sdma_generation = 0;
|
||||
dd->ipath_sdma_descq_tail = 0;
|
||||
dd->ipath_sdma_descq_head = 0;
|
||||
dd->ipath_sdma_descq_removed = 0;
|
||||
dd->ipath_sdma_descq_added = 0;
|
||||
|
||||
/* Set SendDmaBase */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
|
||||
dd->ipath_sdma_descq_phys);
|
||||
/* Set SendDmaLenGen */
|
||||
tmp64 = dd->ipath_sdma_descq_cnt;
|
||||
tmp64 |= 1<<18; /* enable generation checking */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
|
||||
/* Set SendDmaTail */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
|
||||
dd->ipath_sdma_descq_tail);
|
||||
/* Set SendDmaHeadAddr */
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
|
||||
dd->ipath_sdma_head_phys);
|
||||
|
||||
/* Reserve all the former "kernel" piobufs */
|
||||
n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - dd->ipath_pioreserved;
|
||||
for (i = dd->ipath_lastport_piobuf; i < n; ++i) {
|
||||
unsigned word = i / 64;
|
||||
unsigned bit = i & 63;
|
||||
BUG_ON(word >= 3);
|
||||
senddmabufmask[word] |= 1ULL << bit;
|
||||
}
|
||||
ipath_chg_pioavailkernel(dd, dd->ipath_lastport_piobuf,
|
||||
n - dd->ipath_lastport_piobuf, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
|
||||
senddmabufmask[0]);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
|
||||
senddmabufmask[1]);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
|
||||
senddmabufmask[2]);
|
||||
|
||||
INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
|
||||
INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
|
||||
|
||||
tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
|
||||
(unsigned long) dd);
|
||||
tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
|
||||
(unsigned long) dd);
|
||||
|
||||
/*
|
||||
* No use to turn on SDMA here, as link is probably not ACTIVE
|
||||
* Just mark it RUNNING and enable the interrupt, and let the
|
||||
* ipath_restart_sdma() on link transition to ACTIVE actually
|
||||
* enable it.
|
||||
*/
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
__set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void teardown_sdma(struct ipath_devdata *dd)
|
||||
{
|
||||
struct ipath_sdma_txreq *txp, *txpnext;
|
||||
unsigned long flags;
|
||||
dma_addr_t sdma_head_phys = 0;
|
||||
dma_addr_t sdma_descq_phys = 0;
|
||||
void *sdma_descq = NULL;
|
||||
void *sdma_head_dma = NULL;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
__clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
|
||||
__set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
|
||||
__set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
tasklet_kill(&dd->ipath_sdma_abort_task);
|
||||
tasklet_kill(&dd->ipath_sdma_notify_task);
|
||||
|
||||
/* turn off sdma */
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
|
||||
dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
/* dequeue all "sent" requests */
|
||||
list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
|
||||
list) {
|
||||
txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
|
||||
if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
|
||||
vl15_watchdog_deq(dd);
|
||||
list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
sdma_notify_taskbody(dd);
|
||||
|
||||
del_timer_sync(&dd->ipath_sdma_vl15_timer);
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
dd->ipath_sdma_abort_jiffies = 0;
|
||||
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
|
||||
|
||||
if (dd->ipath_sdma_head_dma) {
|
||||
sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
|
||||
sdma_head_phys = dd->ipath_sdma_head_phys;
|
||||
dd->ipath_sdma_head_dma = NULL;
|
||||
dd->ipath_sdma_head_phys = 0;
|
||||
}
|
||||
|
||||
if (dd->ipath_sdma_descq) {
|
||||
sdma_descq = dd->ipath_sdma_descq;
|
||||
sdma_descq_phys = dd->ipath_sdma_descq_phys;
|
||||
dd->ipath_sdma_descq = NULL;
|
||||
dd->ipath_sdma_descq_phys = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
if (sdma_head_dma)
|
||||
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
|
||||
sdma_head_dma, sdma_head_phys);
|
||||
|
||||
if (sdma_descq)
|
||||
dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
|
||||
sdma_descq, sdma_descq_phys);
|
||||
}
|
||||
|
||||
/*
|
||||
* [Re]start SDMA, if we use it, and it's not already OK.
|
||||
* This is called on transition to link ACTIVE, either the first or
|
||||
* subsequent times.
|
||||
*/
|
||||
void ipath_restart_sdma(struct ipath_devdata *dd)
|
||||
{
|
||||
unsigned long flags;
|
||||
int needed = 1;
|
||||
|
||||
if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
|
||||
goto bail;
|
||||
|
||||
/*
|
||||
* First, make sure we should, which is to say,
|
||||
* check that we are "RUNNING" (not in teardown)
|
||||
* and not "SHUTDOWN"
|
||||
*/
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
|
||||
|| test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
|
||||
needed = 0;
|
||||
else {
|
||||
__clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
|
||||
__clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
|
||||
__clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
if (!needed) {
|
||||
ipath_dbg("invalid attempt to restart SDMA, status 0x%016llx\n",
|
||||
dd->ipath_sdma_status);
|
||||
goto bail;
|
||||
}
|
||||
spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
|
||||
/*
|
||||
* First clear, just to be safe. Enable is only done
|
||||
* in chip on 0->1 transition
|
||||
*/
|
||||
dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
|
||||
ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
|
||||
spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
|
||||
|
||||
bail:
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void make_sdma_desc(struct ipath_devdata *dd,
|
||||
u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
|
||||
{
|
||||
WARN_ON(addr & 3);
|
||||
/* SDmaPhyAddr[47:32] */
|
||||
sdmadesc[1] = addr >> 32;
|
||||
/* SDmaPhyAddr[31:0] */
|
||||
sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
|
||||
/* SDmaGeneration[1:0] */
|
||||
sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
|
||||
/* SDmaDwordCount[10:0] */
|
||||
sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
|
||||
/* SDmaBufOffset[12:2] */
|
||||
sdmadesc[0] |= dwoffset & 0x7ffULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function queues one IB packet onto the send DMA queue per call.
|
||||
* The caller is responsible for checking:
|
||||
* 1) The number of send DMA descriptor entries is less than the size of
|
||||
* the descriptor queue.
|
||||
* 2) The IB SGE addresses and lengths are 32-bit aligned
|
||||
* (except possibly the last SGE's length)
|
||||
* 3) The SGE addresses are suitable for passing to dma_map_single().
|
||||
*/
|
||||
int ipath_sdma_verbs_send(struct ipath_devdata *dd,
|
||||
struct ipath_sge_state *ss, u32 dwords,
|
||||
struct ipath_verbs_txreq *tx)
|
||||
{
|
||||
|
||||
unsigned long flags;
|
||||
struct ipath_sge *sge;
|
||||
int ret = 0;
|
||||
u16 tail;
|
||||
__le64 *descqp;
|
||||
u64 sdmadesc[2];
|
||||
u32 dwoffset;
|
||||
dma_addr_t addr;
|
||||
|
||||
if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
|
||||
ipath_dbg("packet size %X > ibmax %X, fail\n",
|
||||
tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
|
||||
ret = -EMSGSIZE;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
retry:
|
||||
if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
|
||||
if (ipath_sdma_make_progress(dd))
|
||||
goto retry;
|
||||
ret = -ENOBUFS;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
|
||||
tx->map_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(addr)) {
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
dwoffset = tx->map_len >> 2;
|
||||
make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
|
||||
|
||||
/* SDmaFirstDesc */
|
||||
sdmadesc[0] |= 1ULL << 12;
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
|
||||
sdmadesc[0] |= 1ULL << 14; /* SDmaUseLargeBuf */
|
||||
|
||||
/* write to the descq */
|
||||
tail = dd->ipath_sdma_descq_tail;
|
||||
descqp = &dd->ipath_sdma_descq[tail].qw[0];
|
||||
*descqp++ = cpu_to_le64(sdmadesc[0]);
|
||||
*descqp++ = cpu_to_le64(sdmadesc[1]);
|
||||
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
|
||||
tx->txreq.start_idx = tail;
|
||||
|
||||
/* increment the tail */
|
||||
if (++tail == dd->ipath_sdma_descq_cnt) {
|
||||
tail = 0;
|
||||
descqp = &dd->ipath_sdma_descq[0].qw[0];
|
||||
++dd->ipath_sdma_generation;
|
||||
}
|
||||
|
||||
sge = &ss->sge;
|
||||
while (dwords) {
|
||||
u32 dw;
|
||||
u32 len;
|
||||
|
||||
len = dwords << 2;
|
||||
if (len > sge->length)
|
||||
len = sge->length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
dw = (len + 3) >> 2;
|
||||
addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
|
||||
DMA_TO_DEVICE);
|
||||
make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
|
||||
/* SDmaUseLargeBuf has to be set in every descriptor */
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
|
||||
sdmadesc[0] |= 1ULL << 14;
|
||||
/* write to the descq */
|
||||
*descqp++ = cpu_to_le64(sdmadesc[0]);
|
||||
*descqp++ = cpu_to_le64(sdmadesc[1]);
|
||||
|
||||
/* increment the tail */
|
||||
if (++tail == dd->ipath_sdma_descq_cnt) {
|
||||
tail = 0;
|
||||
descqp = &dd->ipath_sdma_descq[0].qw[0];
|
||||
++dd->ipath_sdma_generation;
|
||||
}
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr != NULL) {
|
||||
if (++sge->n >= IPATH_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
|
||||
dwoffset += dw;
|
||||
dwords -= dw;
|
||||
}
|
||||
|
||||
if (!tail)
|
||||
descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
|
||||
descqp -= 2;
|
||||
/* SDmaLastDesc */
|
||||
descqp[0] |= __constant_cpu_to_le64(1ULL << 11);
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
|
||||
/* SDmaIntReq */
|
||||
descqp[0] |= __constant_cpu_to_le64(1ULL << 15);
|
||||
}
|
||||
|
||||
/* Commit writes to memory and advance the tail on the chip */
|
||||
wmb();
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
|
||||
|
||||
tx->txreq.next_descq_idx = tail;
|
||||
tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
|
||||
dd->ipath_sdma_descq_tail = tail;
|
||||
dd->ipath_sdma_descq_added += tx->txreq.sg_count;
|
||||
list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
|
||||
vl15_watchdog_enq(dd);
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -245,7 +245,8 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
sizeof(offset_addr));
|
||||
if (ret)
|
||||
goto bail_free;
|
||||
udata->outbuf = (void __user *) offset_addr;
|
||||
udata->outbuf =
|
||||
(void __user *) (unsigned long) offset_addr;
|
||||
ret = ib_copy_to_udata(udata, &offset,
|
||||
sizeof(offset));
|
||||
if (ret)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -136,6 +136,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
||||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
size_t blen = 0;
|
||||
char buf[128];
|
||||
u32 hdrqtail;
|
||||
|
||||
*buf = 0;
|
||||
if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
|
||||
@ -174,17 +175,18 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
||||
if (blen)
|
||||
ipath_dbg("%s\n", buf);
|
||||
|
||||
if (pd->port_head != (u32)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr)) {
|
||||
hdrqtail = ipath_get_hdrqtail(pd);
|
||||
if (pd->port_head != hdrqtail) {
|
||||
if (dd->ipath_lastport0rcv_cnt ==
|
||||
ipath_stats.sps_port0pkts) {
|
||||
ipath_cdbg(PKT, "missing rcv interrupts? "
|
||||
"port0 hd=%llx tl=%x; port0pkts %llx\n",
|
||||
(unsigned long long)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr),
|
||||
pd->port_head,
|
||||
"port0 hd=%x tl=%x; port0pkts %llx; write"
|
||||
" hd (w/intr)\n",
|
||||
pd->port_head, hdrqtail,
|
||||
(unsigned long long)
|
||||
ipath_stats.sps_port0pkts);
|
||||
ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
|
||||
dd->ipath_rhdrhead_intr_off, pd->port_port);
|
||||
}
|
||||
dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
|
||||
}
|
||||
@ -290,11 +292,11 @@ void ipath_get_faststats(unsigned long opaque)
|
||||
&& time_after(jiffies, dd->ipath_unmasktime)) {
|
||||
char ebuf[256];
|
||||
int iserr;
|
||||
iserr = ipath_decode_err(ebuf, sizeof ebuf,
|
||||
dd->ipath_maskederrs);
|
||||
iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
|
||||
dd->ipath_maskederrs);
|
||||
if (dd->ipath_maskederrs &
|
||||
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
||||
INFINIPATH_E_PKTERRS ))
|
||||
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
|
||||
INFINIPATH_E_PKTERRS))
|
||||
ipath_dev_err(dd, "Re-enabling masked errors "
|
||||
"(%s)\n", ebuf);
|
||||
else {
|
||||
@ -306,17 +308,18 @@ void ipath_get_faststats(unsigned long opaque)
|
||||
* level.
|
||||
*/
|
||||
if (iserr)
|
||||
ipath_dbg("Re-enabling queue full errors (%s)\n",
|
||||
ebuf);
|
||||
ipath_dbg(
|
||||
"Re-enabling queue full errors (%s)\n",
|
||||
ebuf);
|
||||
else
|
||||
ipath_cdbg(ERRPKT, "Re-enabling packet"
|
||||
" problem interrupt (%s)\n", ebuf);
|
||||
" problem interrupt (%s)\n", ebuf);
|
||||
}
|
||||
|
||||
/* re-enable masked errors */
|
||||
dd->ipath_errormask |= dd->ipath_maskederrs;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
|
||||
dd->ipath_errormask);
|
||||
dd->ipath_errormask);
|
||||
dd->ipath_maskederrs = 0;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -34,6 +34,7 @@
|
||||
#include <linux/ctype.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_verbs.h"
|
||||
#include "ipath_common.h"
|
||||
|
||||
/**
|
||||
@ -163,6 +164,15 @@ static ssize_t show_boardversion(struct device *dev,
|
||||
return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
|
||||
}
|
||||
|
||||
static ssize_t show_localbus_info(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ipath_devdata *dd = dev_get_drvdata(dev);
|
||||
/* The string printed here is already newline-terminated. */
|
||||
return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
|
||||
}
|
||||
|
||||
static ssize_t show_lmc(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
@ -311,6 +321,8 @@ static ssize_t store_guid(struct device *dev,
|
||||
|
||||
dd->ipath_guid = new_guid;
|
||||
dd->ipath_nguid = 1;
|
||||
if (dd->verbs_dev)
|
||||
dd->verbs_dev->ibdev.node_guid = new_guid;
|
||||
|
||||
ret = strlen(buf);
|
||||
goto bail;
|
||||
@ -919,21 +931,21 @@ static ssize_t store_rx_polinv_enb(struct device *dev,
|
||||
u16 val;
|
||||
|
||||
ret = ipath_parse_ushort(buf, &val);
|
||||
if (ret < 0 || val > 1)
|
||||
goto invalid;
|
||||
|
||||
r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
|
||||
if (r < 0) {
|
||||
ret = r;
|
||||
if (ret >= 0 && val > 1) {
|
||||
ipath_dev_err(dd,
|
||||
"attempt to set invalid Rx Polarity (enable)\n");
|
||||
ret = -EINVAL;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
goto bail;
|
||||
invalid:
|
||||
ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
|
||||
r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
|
||||
if (r < 0)
|
||||
ret = r;
|
||||
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get/Set RX lane-reversal enable. 0=no, 1=yes.
|
||||
*/
|
||||
@ -988,6 +1000,75 @@ static struct attribute_group driver_attr_group = {
|
||||
.attrs = driver_attributes
|
||||
};
|
||||
|
||||
static ssize_t store_tempsense(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct ipath_devdata *dd = dev_get_drvdata(dev);
|
||||
int ret, stat;
|
||||
u16 val;
|
||||
|
||||
ret = ipath_parse_ushort(buf, &val);
|
||||
if (ret <= 0) {
|
||||
ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
|
||||
goto bail;
|
||||
}
|
||||
/* If anything but the highest limit, enable T_CRIT_A "interrupt" */
|
||||
stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
|
||||
if (stat) {
|
||||
ipath_dev_err(dd, "Unable to set tempsense config\n");
|
||||
ret = -1;
|
||||
goto bail;
|
||||
}
|
||||
stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
|
||||
if (stat) {
|
||||
ipath_dev_err(dd, "Unable to set local Tcrit\n");
|
||||
ret = -1;
|
||||
goto bail;
|
||||
}
|
||||
stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
|
||||
if (stat) {
|
||||
ipath_dev_err(dd, "Unable to set remote Tcrit\n");
|
||||
ret = -1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* dump tempsense regs. in decimal, to ease shell-scripts.
|
||||
*/
|
||||
static ssize_t show_tempsense(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ipath_devdata *dd = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
int idx;
|
||||
u8 regvals[8];
|
||||
|
||||
ret = -ENXIO;
|
||||
for (idx = 0; idx < 8; ++idx) {
|
||||
if (idx == 6)
|
||||
continue;
|
||||
ret = ipath_tempsense_read(dd, idx);
|
||||
if (ret < 0)
|
||||
break;
|
||||
regvals[idx] = ret;
|
||||
}
|
||||
if (idx == 8)
|
||||
ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
|
||||
*(signed char *)(regvals),
|
||||
*(signed char *)(regvals + 1),
|
||||
regvals[2], regvals[3],
|
||||
*(signed char *)(regvals + 5),
|
||||
*(signed char *)(regvals + 7));
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct attribute_group *ipath_driver_attr_groups[] = {
|
||||
&driver_attr_group,
|
||||
NULL,
|
||||
@ -1011,10 +1092,13 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
|
||||
static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
|
||||
static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
|
||||
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
|
||||
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
|
||||
static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
|
||||
show_jint_max_packets, store_jint_max_packets);
|
||||
static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
|
||||
show_jint_idle_ticks, store_jint_idle_ticks);
|
||||
static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
|
||||
show_tempsense, store_tempsense);
|
||||
|
||||
static struct attribute *dev_attributes[] = {
|
||||
&dev_attr_guid.attr,
|
||||
@ -1034,6 +1118,8 @@ static struct attribute *dev_attributes[] = {
|
||||
&dev_attr_rx_pol_inv.attr,
|
||||
&dev_attr_led_override.attr,
|
||||
&dev_attr_logged_errors.attr,
|
||||
&dev_attr_tempsense.attr,
|
||||
&dev_attr_localbus_info.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -94,7 +94,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
|
||||
qp->s_state =
|
||||
OP(SEND_ONLY_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
}
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
@ -123,7 +123,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
|
||||
qp->s_state =
|
||||
OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the RETH */
|
||||
ohdr->u.rc.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= 1 << 23;
|
||||
@ -152,7 +152,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
|
||||
else {
|
||||
qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
}
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
@ -177,7 +177,7 @@ int ipath_make_uc_req(struct ipath_qp *qp)
|
||||
qp->s_state =
|
||||
OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
|
||||
/* Immediate data comes after the BTH */
|
||||
ohdr->u.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.imm_data = wqe->wr.ex.imm_data;
|
||||
hwords += 1;
|
||||
if (wqe->wr.send_flags & IB_SEND_SOLICITED)
|
||||
bth0 |= 1 << 23;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -95,7 +95,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
|
||||
|
||||
if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
wc.imm_data = swqe->wr.imm_data;
|
||||
wc.imm_data = swqe->wr.ex.imm_data;
|
||||
} else {
|
||||
wc.wc_flags = 0;
|
||||
wc.imm_data = 0;
|
||||
@ -303,6 +303,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
|
||||
qp->s_hdrwords = 7;
|
||||
qp->s_cur_size = wqe->length;
|
||||
qp->s_cur_sge = &qp->s_sge;
|
||||
qp->s_dmult = ah_attr->static_rate;
|
||||
qp->s_wqe = wqe;
|
||||
qp->s_sge.sge = wqe->sg_list[0];
|
||||
qp->s_sge.sg_list = wqe->sg_list + 1;
|
||||
@ -326,7 +327,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
|
||||
}
|
||||
if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
|
||||
qp->s_hdrwords++;
|
||||
ohdr->u.ud.imm_data = wqe->wr.imm_data;
|
||||
ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
|
||||
bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
|
||||
} else
|
||||
bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
|
||||
|
879
drivers/infiniband/hw/ipath/ipath_user_sdma.c
Normal file
879
drivers/infiniband/hw/ipath/ipath_user_sdma.c
Normal file
@ -0,0 +1,879 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "ipath_kernel.h"
|
||||
#include "ipath_user_sdma.h"
|
||||
|
||||
/* minimum size of header */
|
||||
#define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
|
||||
/* expected size of headers (for dma_pool) */
|
||||
#define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
|
||||
/* length mask in PBC (lower 11 bits) */
|
||||
#define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
|
||||
|
||||
struct ipath_user_sdma_pkt {
|
||||
u8 naddr; /* dimension of addr (1..3) ... */
|
||||
u32 counter; /* sdma pkts queued counter for this entry */
|
||||
u64 added; /* global descq number of entries */
|
||||
|
||||
struct {
|
||||
u32 offset; /* offset for kvaddr, addr */
|
||||
u32 length; /* length in page */
|
||||
u8 put_page; /* should we put_page? */
|
||||
u8 dma_mapped; /* is page dma_mapped? */
|
||||
struct page *page; /* may be NULL (coherent mem) */
|
||||
void *kvaddr; /* FIXME: only for pio hack */
|
||||
dma_addr_t addr;
|
||||
} addr[4]; /* max pages, any more and we coalesce */
|
||||
struct list_head list; /* list element */
|
||||
};
|
||||
|
||||
struct ipath_user_sdma_queue {
|
||||
/*
|
||||
* pkts sent to dma engine are queued on this
|
||||
* list head. the type of the elements of this
|
||||
* list are struct ipath_user_sdma_pkt...
|
||||
*/
|
||||
struct list_head sent;
|
||||
|
||||
/* headers with expected length are allocated from here... */
|
||||
char header_cache_name[64];
|
||||
struct dma_pool *header_cache;
|
||||
|
||||
/* packets are allocated from the slab cache... */
|
||||
char pkt_slab_name[64];
|
||||
struct kmem_cache *pkt_slab;
|
||||
|
||||
/* as packets go on the queued queue, they are counted... */
|
||||
u32 counter;
|
||||
u32 sent_counter;
|
||||
|
||||
/* dma page table */
|
||||
struct rb_root dma_pages_root;
|
||||
|
||||
/* protect everything above... */
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct ipath_user_sdma_queue *
|
||||
ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
|
||||
{
|
||||
struct ipath_user_sdma_queue *pq =
|
||||
kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
|
||||
|
||||
if (!pq)
|
||||
goto done;
|
||||
|
||||
pq->counter = 0;
|
||||
pq->sent_counter = 0;
|
||||
INIT_LIST_HEAD(&pq->sent);
|
||||
|
||||
mutex_init(&pq->lock);
|
||||
|
||||
snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
|
||||
"ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
|
||||
pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
|
||||
sizeof(struct ipath_user_sdma_pkt),
|
||||
0, 0, NULL);
|
||||
|
||||
if (!pq->pkt_slab)
|
||||
goto err_kfree;
|
||||
|
||||
snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
|
||||
"ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
|
||||
pq->header_cache = dma_pool_create(pq->header_cache_name,
|
||||
dev,
|
||||
IPATH_USER_SDMA_EXP_HEADER_LENGTH,
|
||||
4, 0);
|
||||
if (!pq->header_cache)
|
||||
goto err_slab;
|
||||
|
||||
pq->dma_pages_root = RB_ROOT;
|
||||
|
||||
goto done;
|
||||
|
||||
err_slab:
|
||||
kmem_cache_destroy(pq->pkt_slab);
|
||||
err_kfree:
|
||||
kfree(pq);
|
||||
pq = NULL;
|
||||
|
||||
done:
|
||||
return pq;
|
||||
}
|
||||
|
||||
static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
|
||||
int i, size_t offset, size_t len,
|
||||
int put_page, int dma_mapped,
|
||||
struct page *page,
|
||||
void *kvaddr, dma_addr_t dma_addr)
|
||||
{
|
||||
pkt->addr[i].offset = offset;
|
||||
pkt->addr[i].length = len;
|
||||
pkt->addr[i].put_page = put_page;
|
||||
pkt->addr[i].dma_mapped = dma_mapped;
|
||||
pkt->addr[i].page = page;
|
||||
pkt->addr[i].kvaddr = kvaddr;
|
||||
pkt->addr[i].addr = dma_addr;
|
||||
}
|
||||
|
||||
static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
|
||||
u32 counter, size_t offset,
|
||||
size_t len, int dma_mapped,
|
||||
struct page *page,
|
||||
void *kvaddr, dma_addr_t dma_addr)
|
||||
{
|
||||
pkt->naddr = 1;
|
||||
pkt->counter = counter;
|
||||
ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
|
||||
kvaddr, dma_addr);
|
||||
}
|
||||
|
||||
/* we've too many pages in the iovec, coalesce to a single page */
|
||||
static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_pkt *pkt,
|
||||
const struct iovec *iov,
|
||||
unsigned long niov) {
|
||||
int ret = 0;
|
||||
struct page *page = alloc_page(GFP_KERNEL);
|
||||
void *mpage_save;
|
||||
char *mpage;
|
||||
int i;
|
||||
int len = 0;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
mpage = kmap(page);
|
||||
mpage_save = mpage;
|
||||
for (i = 0; i < niov; i++) {
|
||||
int cfur;
|
||||
|
||||
cfur = copy_from_user(mpage,
|
||||
iov[i].iov_base, iov[i].iov_len);
|
||||
if (cfur) {
|
||||
ret = -EFAULT;
|
||||
goto free_unmap;
|
||||
}
|
||||
|
||||
mpage += iov[i].iov_len;
|
||||
len += iov[i].iov_len;
|
||||
}
|
||||
|
||||
dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_addr)) {
|
||||
ret = -ENOMEM;
|
||||
goto free_unmap;
|
||||
}
|
||||
|
||||
ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
|
||||
dma_addr);
|
||||
pkt->naddr = 2;
|
||||
|
||||
goto done;
|
||||
|
||||
free_unmap:
|
||||
kunmap(page);
|
||||
__free_page(page);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* how many pages in this iovec element? */
|
||||
static int ipath_user_sdma_num_pages(const struct iovec *iov)
|
||||
{
|
||||
const unsigned long addr = (unsigned long) iov->iov_base;
|
||||
const unsigned long len = iov->iov_len;
|
||||
const unsigned long spage = addr & PAGE_MASK;
|
||||
const unsigned long epage = (addr + len - 1) & PAGE_MASK;
|
||||
|
||||
return 1 + ((epage - spage) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/* truncate length to page boundry */
|
||||
static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
|
||||
{
|
||||
const unsigned long offset = addr & ~PAGE_MASK;
|
||||
|
||||
return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
|
||||
}
|
||||
|
||||
static void ipath_user_sdma_free_pkt_frag(struct device *dev,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct ipath_user_sdma_pkt *pkt,
|
||||
int frag)
|
||||
{
|
||||
const int i = frag;
|
||||
|
||||
if (pkt->addr[i].page) {
|
||||
if (pkt->addr[i].dma_mapped)
|
||||
dma_unmap_page(dev,
|
||||
pkt->addr[i].addr,
|
||||
pkt->addr[i].length,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (pkt->addr[i].kvaddr)
|
||||
kunmap(pkt->addr[i].page);
|
||||
|
||||
if (pkt->addr[i].put_page)
|
||||
put_page(pkt->addr[i].page);
|
||||
else
|
||||
__free_page(pkt->addr[i].page);
|
||||
} else if (pkt->addr[i].kvaddr)
|
||||
/* free coherent mem from cache... */
|
||||
dma_pool_free(pq->header_cache,
|
||||
pkt->addr[i].kvaddr, pkt->addr[i].addr);
|
||||
}
|
||||
|
||||
/* return number of pages pinned... */
|
||||
static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_pkt *pkt,
|
||||
unsigned long addr, int tlen, int npages)
|
||||
{
|
||||
struct page *pages[2];
|
||||
int j;
|
||||
int ret;
|
||||
|
||||
ret = get_user_pages(current, current->mm, addr,
|
||||
npages, 0, 1, pages, NULL);
|
||||
|
||||
if (ret != npages) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ret; i++)
|
||||
put_page(pages[i]);
|
||||
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
for (j = 0; j < npages; j++) {
|
||||
/* map the pages... */
|
||||
const int flen =
|
||||
ipath_user_sdma_page_length(addr, tlen);
|
||||
dma_addr_t dma_addr =
|
||||
dma_map_page(&dd->pcidev->dev,
|
||||
pages[j], 0, flen, DMA_TO_DEVICE);
|
||||
unsigned long fofs = addr & ~PAGE_MASK;
|
||||
|
||||
if (dma_mapping_error(dma_addr)) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
|
||||
pages[j], kmap(pages[j]),
|
||||
dma_addr);
|
||||
|
||||
pkt->naddr++;
|
||||
addr += flen;
|
||||
tlen -= flen;
|
||||
}
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct ipath_user_sdma_pkt *pkt,
|
||||
const struct iovec *iov,
|
||||
unsigned long niov)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long idx;
|
||||
|
||||
for (idx = 0; idx < niov; idx++) {
|
||||
const int npages = ipath_user_sdma_num_pages(iov + idx);
|
||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||
|
||||
ret = ipath_user_sdma_pin_pages(dd, pkt,
|
||||
addr, iov[idx].iov_len,
|
||||
npages);
|
||||
if (ret < 0)
|
||||
goto free_pkt;
|
||||
}
|
||||
|
||||
goto done;
|
||||
|
||||
free_pkt:
|
||||
for (idx = 0; idx < pkt->naddr; idx++)
|
||||
ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct ipath_user_sdma_pkt *pkt,
|
||||
const struct iovec *iov,
|
||||
unsigned long niov, int npages)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (npages >= ARRAY_SIZE(pkt->addr))
|
||||
ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
|
||||
else
|
||||
ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* free a packet list -- return counter value of last packet */
|
||||
static void ipath_user_sdma_free_pkt_list(struct device *dev,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct list_head *list)
|
||||
{
|
||||
struct ipath_user_sdma_pkt *pkt, *pkt_next;
|
||||
|
||||
list_for_each_entry_safe(pkt, pkt_next, list, list) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pkt->naddr; i++)
|
||||
ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
|
||||
|
||||
kmem_cache_free(pq->pkt_slab, pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* copy headers, coalesce etc -- pq->lock must be held
|
||||
*
|
||||
* we queue all the packets to list, returning the
|
||||
* number of bytes total. list must be empty initially,
|
||||
* as, if there is an error we clean it...
|
||||
*/
|
||||
static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct list_head *list,
|
||||
const struct iovec *iov,
|
||||
unsigned long niov,
|
||||
int maxpkts)
|
||||
{
|
||||
unsigned long idx = 0;
|
||||
int ret = 0;
|
||||
int npkts = 0;
|
||||
struct page *page = NULL;
|
||||
__le32 *pbc;
|
||||
dma_addr_t dma_addr;
|
||||
struct ipath_user_sdma_pkt *pkt = NULL;
|
||||
size_t len;
|
||||
size_t nw;
|
||||
u32 counter = pq->counter;
|
||||
int dma_mapped = 0;
|
||||
|
||||
while (idx < niov && npkts < maxpkts) {
|
||||
const unsigned long addr = (unsigned long) iov[idx].iov_base;
|
||||
const unsigned long idx_save = idx;
|
||||
unsigned pktnw;
|
||||
unsigned pktnwc;
|
||||
int nfrags = 0;
|
||||
int npages = 0;
|
||||
int cfur;
|
||||
|
||||
dma_mapped = 0;
|
||||
len = iov[idx].iov_len;
|
||||
nw = len >> 2;
|
||||
page = NULL;
|
||||
|
||||
pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
|
||||
if (!pkt) {
|
||||
ret = -ENOMEM;
|
||||
goto free_list;
|
||||
}
|
||||
|
||||
if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
|
||||
len > PAGE_SIZE || len & 3 || addr & 3) {
|
||||
ret = -EINVAL;
|
||||
goto free_pkt;
|
||||
}
|
||||
|
||||
if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
|
||||
pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
|
||||
&dma_addr);
|
||||
else
|
||||
pbc = NULL;
|
||||
|
||||
if (!pbc) {
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pkt;
|
||||
}
|
||||
pbc = kmap(page);
|
||||
}
|
||||
|
||||
cfur = copy_from_user(pbc, iov[idx].iov_base, len);
|
||||
if (cfur) {
|
||||
ret = -EFAULT;
|
||||
goto free_pbc;
|
||||
}
|
||||
|
||||
/*
|
||||
* this assignment is a bit strange. it's because the
|
||||
* the pbc counts the number of 32 bit words in the full
|
||||
* packet _except_ the first word of the pbc itself...
|
||||
*/
|
||||
pktnwc = nw - 1;
|
||||
|
||||
/*
|
||||
* pktnw computation yields the number of 32 bit words
|
||||
* that the caller has indicated in the PBC. note that
|
||||
* this is one less than the total number of words that
|
||||
* goes to the send DMA engine as the first 32 bit word
|
||||
* of the PBC itself is not counted. Armed with this count,
|
||||
* we can verify that the packet is consistent with the
|
||||
* iovec lengths.
|
||||
*/
|
||||
pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
|
||||
if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
|
||||
|
||||
idx++;
|
||||
while (pktnwc < pktnw && idx < niov) {
|
||||
const size_t slen = iov[idx].iov_len;
|
||||
const unsigned long faddr =
|
||||
(unsigned long) iov[idx].iov_base;
|
||||
|
||||
if (slen & 3 || faddr & 3 || !slen ||
|
||||
slen > PAGE_SIZE) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
|
||||
npages++;
|
||||
if ((faddr & PAGE_MASK) !=
|
||||
((faddr + slen - 1) & PAGE_MASK))
|
||||
npages++;
|
||||
|
||||
pktnwc += slen >> 2;
|
||||
idx++;
|
||||
nfrags++;
|
||||
}
|
||||
|
||||
if (pktnwc != pktnw) {
|
||||
ret = -EINVAL;
|
||||
goto free_pbc;
|
||||
}
|
||||
|
||||
if (page) {
|
||||
dma_addr = dma_map_page(&dd->pcidev->dev,
|
||||
page, 0, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_addr)) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pbc;
|
||||
}
|
||||
|
||||
dma_mapped = 1;
|
||||
}
|
||||
|
||||
ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
|
||||
page, pbc, dma_addr);
|
||||
|
||||
if (nfrags) {
|
||||
ret = ipath_user_sdma_init_payload(dd, pq, pkt,
|
||||
iov + idx_save + 1,
|
||||
nfrags, npages);
|
||||
if (ret < 0)
|
||||
goto free_pbc_dma;
|
||||
}
|
||||
|
||||
counter++;
|
||||
npkts++;
|
||||
|
||||
list_add_tail(&pkt->list, list);
|
||||
}
|
||||
|
||||
ret = idx;
|
||||
goto done;
|
||||
|
||||
free_pbc_dma:
|
||||
if (dma_mapped)
|
||||
dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
|
||||
free_pbc:
|
||||
if (page) {
|
||||
kunmap(page);
|
||||
__free_page(page);
|
||||
} else
|
||||
dma_pool_free(pq->header_cache, pbc, dma_addr);
|
||||
free_pkt:
|
||||
kmem_cache_free(pq->pkt_slab, pkt);
|
||||
free_list:
|
||||
ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
|
||||
u32 c)
|
||||
{
|
||||
pq->sent_counter = c;
|
||||
}
|
||||
|
||||
/* try to clean out queue -- needs pq->lock */
|
||||
static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
struct list_head free_list;
|
||||
struct ipath_user_sdma_pkt *pkt;
|
||||
struct ipath_user_sdma_pkt *pkt_prev;
|
||||
int ret = 0;
|
||||
|
||||
INIT_LIST_HEAD(&free_list);
|
||||
|
||||
list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
|
||||
s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
|
||||
|
||||
if (descd < 0)
|
||||
break;
|
||||
|
||||
list_move_tail(&pkt->list, &free_list);
|
||||
|
||||
/* one more packet cleaned */
|
||||
ret++;
|
||||
}
|
||||
|
||||
if (!list_empty(&free_list)) {
|
||||
u32 counter;
|
||||
|
||||
pkt = list_entry(free_list.prev,
|
||||
struct ipath_user_sdma_pkt, list);
|
||||
counter = pkt->counter;
|
||||
|
||||
ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
|
||||
ipath_user_sdma_set_complete_counter(pq, counter);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
if (!pq)
|
||||
return;
|
||||
|
||||
kmem_cache_destroy(pq->pkt_slab);
|
||||
dma_pool_destroy(pq->header_cache);
|
||||
kfree(pq);
|
||||
}
|
||||
|
||||
/* clean descriptor queue, returns > 0 if some elements cleaned */
|
||||
static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
ret = ipath_sdma_make_progress(dd);
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* we're in close, drain packets so that we can cleanup successfully... */
|
||||
void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!pq)
|
||||
return;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
mutex_lock(&pq->lock);
|
||||
if (list_empty(&pq->sent)) {
|
||||
mutex_unlock(&pq->lock);
|
||||
break;
|
||||
}
|
||||
ipath_user_sdma_hwqueue_clean(dd);
|
||||
ipath_user_sdma_queue_clean(dd, pq);
|
||||
mutex_unlock(&pq->lock);
|
||||
msleep(10);
|
||||
}
|
||||
|
||||
if (!list_empty(&pq->sent)) {
|
||||
struct list_head free_list;
|
||||
|
||||
printk(KERN_INFO "drain: lists not empty: forcing!\n");
|
||||
INIT_LIST_HEAD(&free_list);
|
||||
mutex_lock(&pq->lock);
|
||||
list_splice_init(&pq->sent, &free_list);
|
||||
ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
|
||||
mutex_unlock(&pq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
|
||||
u64 addr, u64 dwlen, u64 dwoffset)
|
||||
{
|
||||
return cpu_to_le64(/* SDmaPhyAddr[31:0] */
|
||||
((addr & 0xfffffffcULL) << 32) |
|
||||
/* SDmaGeneration[1:0] */
|
||||
((dd->ipath_sdma_generation & 3ULL) << 30) |
|
||||
/* SDmaDwordCount[10:0] */
|
||||
((dwlen & 0x7ffULL) << 16) |
|
||||
/* SDmaBufOffset[12:2] */
|
||||
(dwoffset & 0x7ffULL));
|
||||
}
|
||||
|
||||
static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
|
||||
{
|
||||
return descq | __constant_cpu_to_le64(1ULL << 12);
|
||||
}
|
||||
|
||||
static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
|
||||
{
|
||||
/* last */ /* dma head */
|
||||
return descq | __constant_cpu_to_le64(1ULL << 11 | 1ULL << 13);
|
||||
}
|
||||
|
||||
static inline __le64 ipath_sdma_make_desc1(u64 addr)
|
||||
{
|
||||
/* SDmaPhyAddr[47:32] */
|
||||
return cpu_to_le64(addr >> 32);
|
||||
}
|
||||
|
||||
static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_pkt *pkt, int idx,
|
||||
unsigned ofs, u16 tail)
|
||||
{
|
||||
const u64 addr = (u64) pkt->addr[idx].addr +
|
||||
(u64) pkt->addr[idx].offset;
|
||||
const u64 dwlen = (u64) pkt->addr[idx].length / 4;
|
||||
__le64 *descqp;
|
||||
__le64 descq0;
|
||||
|
||||
descqp = &dd->ipath_sdma_descq[tail].qw[0];
|
||||
|
||||
descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
|
||||
if (idx == 0)
|
||||
descq0 = ipath_sdma_make_first_desc0(descq0);
|
||||
if (idx == pkt->naddr - 1)
|
||||
descq0 = ipath_sdma_make_last_desc0(descq0);
|
||||
|
||||
descqp[0] = descq0;
|
||||
descqp[1] = ipath_sdma_make_desc1(addr);
|
||||
}
|
||||
|
||||
/* pq->lock must be held, get packets on the wire... */
|
||||
static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
struct list_head *pktlist)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
u16 tail;
|
||||
|
||||
if (list_empty(pktlist))
|
||||
return 0;
|
||||
|
||||
if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
|
||||
return -ECOMM;
|
||||
|
||||
spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
|
||||
ret = -ECOMM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
tail = dd->ipath_sdma_descq_tail;
|
||||
while (!list_empty(pktlist)) {
|
||||
struct ipath_user_sdma_pkt *pkt =
|
||||
list_entry(pktlist->next, struct ipath_user_sdma_pkt,
|
||||
list);
|
||||
int i;
|
||||
unsigned ofs = 0;
|
||||
u16 dtail = tail;
|
||||
|
||||
if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
|
||||
goto unlock_check_tail;
|
||||
|
||||
for (i = 0; i < pkt->naddr; i++) {
|
||||
ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
|
||||
ofs += pkt->addr[i].length >> 2;
|
||||
|
||||
if (++tail == dd->ipath_sdma_descq_cnt) {
|
||||
tail = 0;
|
||||
++dd->ipath_sdma_generation;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ofs<<2) > dd->ipath_ibmaxlen) {
|
||||
ipath_dbg("packet size %X > ibmax %X, fail\n",
|
||||
ofs<<2, dd->ipath_ibmaxlen);
|
||||
ret = -EMSGSIZE;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* if the packet is >= 2KB mtu equivalent, we have to use
|
||||
* the large buffers, and have to mark each descriptor as
|
||||
* part of a large buffer packet.
|
||||
*/
|
||||
if (ofs >= IPATH_SMALLBUF_DWORDS) {
|
||||
for (i = 0; i < pkt->naddr; i++) {
|
||||
dd->ipath_sdma_descq[dtail].qw[0] |=
|
||||
__constant_cpu_to_le64(1ULL << 14);
|
||||
if (++dtail == dd->ipath_sdma_descq_cnt)
|
||||
dtail = 0;
|
||||
}
|
||||
}
|
||||
|
||||
dd->ipath_sdma_descq_added += pkt->naddr;
|
||||
pkt->added = dd->ipath_sdma_descq_added;
|
||||
list_move_tail(&pkt->list, &pq->sent);
|
||||
ret++;
|
||||
}
|
||||
|
||||
unlock_check_tail:
|
||||
/* advance the tail on the chip if necessary */
|
||||
if (dd->ipath_sdma_descq_tail != tail) {
|
||||
wmb();
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
|
||||
dd->ipath_sdma_descq_tail = tail;
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ipath_user_sdma_writev(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
const struct iovec *iov,
|
||||
unsigned long dim)
|
||||
{
|
||||
int ret = 0;
|
||||
struct list_head list;
|
||||
int npkts = 0;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
mutex_lock(&pq->lock);
|
||||
|
||||
if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
|
||||
ipath_user_sdma_hwqueue_clean(dd);
|
||||
ipath_user_sdma_queue_clean(dd, pq);
|
||||
}
|
||||
|
||||
while (dim) {
|
||||
const int mxp = 8;
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (ret <= 0)
|
||||
goto done_unlock;
|
||||
else {
|
||||
dim -= ret;
|
||||
iov += ret;
|
||||
}
|
||||
|
||||
/* force packets onto the sdma hw queue... */
|
||||
if (!list_empty(&list)) {
|
||||
/*
|
||||
* lazily clean hw queue. the 4 is a guess of about
|
||||
* how many sdma descriptors a packet will take (it
|
||||
* doesn't have to be perfect).
|
||||
*/
|
||||
if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
|
||||
ipath_user_sdma_hwqueue_clean(dd);
|
||||
ipath_user_sdma_queue_clean(dd, pq);
|
||||
}
|
||||
|
||||
ret = ipath_user_sdma_push_pkts(dd, pq, &list);
|
||||
if (ret < 0)
|
||||
goto done_unlock;
|
||||
else {
|
||||
npkts += ret;
|
||||
pq->counter += ret;
|
||||
|
||||
if (!list_empty(&list))
|
||||
goto done_unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done_unlock:
|
||||
if (!list_empty(&list))
|
||||
ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
|
||||
mutex_unlock(&pq->lock);
|
||||
|
||||
return (ret < 0) ? ret : npkts;
|
||||
}
|
||||
|
||||
int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&pq->lock);
|
||||
ipath_user_sdma_hwqueue_clean(dd);
|
||||
ret = ipath_user_sdma_queue_clean(dd, pq);
|
||||
mutex_unlock(&pq->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
return pq->sent_counter;
|
||||
}
|
||||
|
||||
u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
|
||||
{
|
||||
return pq->counter;
|
||||
}
|
||||
|
54
drivers/infiniband/hw/ipath/ipath_user_sdma.h
Normal file
54
drivers/infiniband/hw/ipath/ipath_user_sdma.h
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
|
||||
struct ipath_user_sdma_queue;
|
||||
|
||||
struct ipath_user_sdma_queue *
|
||||
ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
|
||||
void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
|
||||
|
||||
int ipath_user_sdma_writev(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq,
|
||||
const struct iovec *iov,
|
||||
unsigned long dim);
|
||||
|
||||
int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq);
|
||||
|
||||
int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq,
|
||||
u32 counter);
|
||||
void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
|
||||
struct ipath_user_sdma_queue *pq);
|
||||
|
||||
u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
|
||||
u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -242,6 +242,93 @@ static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr)
|
||||
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Count the number of DMA descriptors needed to send length bytes of data.
|
||||
* Don't modify the ipath_sge_state to get the count.
|
||||
* Return zero if any of the segments is not aligned.
|
||||
*/
|
||||
static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
|
||||
{
|
||||
struct ipath_sge *sg_list = ss->sg_list;
|
||||
struct ipath_sge sge = ss->sge;
|
||||
u8 num_sge = ss->num_sge;
|
||||
u32 ndesc = 1; /* count the header */
|
||||
|
||||
while (length) {
|
||||
u32 len = sge.length;
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge.sge_length)
|
||||
len = sge.sge_length;
|
||||
BUG_ON(len == 0);
|
||||
if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
|
||||
(len != length && (len & (sizeof(u32) - 1)))) {
|
||||
ndesc = 0;
|
||||
break;
|
||||
}
|
||||
ndesc++;
|
||||
sge.vaddr += len;
|
||||
sge.length -= len;
|
||||
sge.sge_length -= len;
|
||||
if (sge.sge_length == 0) {
|
||||
if (--num_sge)
|
||||
sge = *sg_list++;
|
||||
} else if (sge.length == 0 && sge.mr != NULL) {
|
||||
if (++sge.n >= IPATH_SEGSZ) {
|
||||
if (++sge.m >= sge.mr->mapsz)
|
||||
break;
|
||||
sge.n = 0;
|
||||
}
|
||||
sge.vaddr =
|
||||
sge.mr->map[sge.m]->segs[sge.n].vaddr;
|
||||
sge.length =
|
||||
sge.mr->map[sge.m]->segs[sge.n].length;
|
||||
}
|
||||
length -= len;
|
||||
}
|
||||
return ndesc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy from the SGEs to the data buffer.
|
||||
*/
|
||||
static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
|
||||
u32 length)
|
||||
{
|
||||
struct ipath_sge *sge = &ss->sge;
|
||||
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
|
||||
if (len > length)
|
||||
len = length;
|
||||
if (len > sge->sge_length)
|
||||
len = sge->sge_length;
|
||||
BUG_ON(len == 0);
|
||||
memcpy(data, sge->vaddr, len);
|
||||
sge->vaddr += len;
|
||||
sge->length -= len;
|
||||
sge->sge_length -= len;
|
||||
if (sge->sge_length == 0) {
|
||||
if (--ss->num_sge)
|
||||
*sge = *ss->sg_list++;
|
||||
} else if (sge->length == 0 && sge->mr != NULL) {
|
||||
if (++sge->n >= IPATH_SEGSZ) {
|
||||
if (++sge->m >= sge->mr->mapsz)
|
||||
break;
|
||||
sge->n = 0;
|
||||
}
|
||||
sge->vaddr =
|
||||
sge->mr->map[sge->m]->segs[sge->n].vaddr;
|
||||
sge->length =
|
||||
sge->mr->map[sge->m]->segs[sge->n].length;
|
||||
}
|
||||
data += len;
|
||||
length -= len;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ipath_post_one_send - post one RC, UC, or UD send work request
|
||||
* @qp: the QP to post on
|
||||
@ -866,27 +953,257 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
|
||||
__raw_writel(last, piobuf);
|
||||
}
|
||||
|
||||
static int ipath_verbs_send_pio(struct ipath_qp *qp, u32 *hdr, u32 hdrwords,
|
||||
/*
|
||||
* Convert IB rate to delay multiplier.
|
||||
*/
|
||||
unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
|
||||
{
|
||||
switch (rate) {
|
||||
case IB_RATE_2_5_GBPS: return 8;
|
||||
case IB_RATE_5_GBPS: return 4;
|
||||
case IB_RATE_10_GBPS: return 2;
|
||||
case IB_RATE_20_GBPS: return 1;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert delay multiplier to IB rate
|
||||
*/
|
||||
static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
|
||||
{
|
||||
switch (mult) {
|
||||
case 8: return IB_RATE_2_5_GBPS;
|
||||
case 4: return IB_RATE_5_GBPS;
|
||||
case 2: return IB_RATE_10_GBPS;
|
||||
case 1: return IB_RATE_20_GBPS;
|
||||
default: return IB_RATE_PORT_CURRENT;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
|
||||
{
|
||||
struct ipath_verbs_txreq *tx = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
if (!list_empty(&dev->txreq_free)) {
|
||||
struct list_head *l = dev->txreq_free.next;
|
||||
|
||||
list_del(l);
|
||||
tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
return tx;
|
||||
}
|
||||
|
||||
static inline void put_txreq(struct ipath_ibdev *dev,
|
||||
struct ipath_verbs_txreq *tx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->pending_lock, flags);
|
||||
list_add(&tx->txreq.list, &dev->txreq_free);
|
||||
spin_unlock_irqrestore(&dev->pending_lock, flags);
|
||||
}
|
||||
|
||||
static void sdma_complete(void *cookie, int status)
|
||||
{
|
||||
struct ipath_verbs_txreq *tx = cookie;
|
||||
struct ipath_qp *qp = tx->qp;
|
||||
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
||||
|
||||
/* Generate a completion queue entry if needed */
|
||||
if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) {
|
||||
enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
|
||||
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
|
||||
|
||||
ipath_send_complete(qp, tx->wqe, ibs);
|
||||
}
|
||||
|
||||
if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
|
||||
kfree(tx->txreq.map_addr);
|
||||
put_txreq(dev, tx);
|
||||
|
||||
if (atomic_dec_and_test(&qp->refcount))
|
||||
wake_up(&qp->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the number of clock cycles of delay before sending the next packet.
|
||||
* The multipliers reflect the number of clocks for the fastest rate so
|
||||
* one tick at 4xDDR is 8 ticks at 1xSDR.
|
||||
* If the destination port will take longer to receive a packet than
|
||||
* the outgoing link can send it, we need to delay sending the next packet
|
||||
* by the difference in time it takes the receiver to receive and the sender
|
||||
* to send this packet.
|
||||
* Note that this delay is always correct for UC and RC but not always
|
||||
* optimal for UD. For UD, the destination HCA can be different for each
|
||||
* packet, in which case, we could send packets to a different destination
|
||||
* while "waiting" for the delay. The overhead for doing this without
|
||||
* HW support is more than just paying the cost of delaying some packets
|
||||
* unnecessarily.
|
||||
*/
|
||||
static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
|
||||
{
|
||||
return (rcv_mult > snd_mult) ?
|
||||
(plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
|
||||
}
|
||||
|
||||
static int ipath_verbs_send_dma(struct ipath_qp *qp,
|
||||
struct ipath_ib_header *hdr, u32 hdrwords,
|
||||
struct ipath_sge_state *ss, u32 len,
|
||||
u32 plen, u32 dwords)
|
||||
{
|
||||
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
|
||||
struct ipath_devdata *dd = dev->dd;
|
||||
struct ipath_verbs_txreq *tx;
|
||||
u32 *piobuf;
|
||||
u32 control;
|
||||
u32 ndesc;
|
||||
int ret;
|
||||
|
||||
tx = qp->s_tx;
|
||||
if (tx) {
|
||||
qp->s_tx = NULL;
|
||||
/* resend previously constructed packet */
|
||||
ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
|
||||
if (ret)
|
||||
qp->s_tx = tx;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
tx = get_txreq(dev);
|
||||
if (!tx) {
|
||||
ret = -EBUSY;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the saved delay count we computed for the previous packet
|
||||
* and save the delay count for this packet to be used next time
|
||||
* we get here.
|
||||
*/
|
||||
control = qp->s_pkt_delay;
|
||||
qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
|
||||
|
||||
tx->qp = qp;
|
||||
atomic_inc(&qp->refcount);
|
||||
tx->wqe = qp->s_wqe;
|
||||
tx->txreq.callback = sdma_complete;
|
||||
tx->txreq.callback_cookie = tx;
|
||||
tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
|
||||
IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
|
||||
if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
|
||||
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
|
||||
|
||||
/* VL15 packets bypass credit check */
|
||||
if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
|
||||
control |= 1ULL << 31;
|
||||
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
|
||||
}
|
||||
|
||||
if (len) {
|
||||
/*
|
||||
* Don't try to DMA if it takes more descriptors than
|
||||
* the queue holds.
|
||||
*/
|
||||
ndesc = ipath_count_sge(ss, len);
|
||||
if (ndesc >= dd->ipath_sdma_descq_cnt)
|
||||
ndesc = 0;
|
||||
} else
|
||||
ndesc = 1;
|
||||
if (ndesc) {
|
||||
tx->hdr.pbc[0] = cpu_to_le32(plen);
|
||||
tx->hdr.pbc[1] = cpu_to_le32(control);
|
||||
memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
|
||||
tx->txreq.sg_count = ndesc;
|
||||
tx->map_len = (hdrwords + 2) << 2;
|
||||
tx->txreq.map_addr = &tx->hdr;
|
||||
ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
|
||||
if (ret) {
|
||||
/* save ss and length in dwords */
|
||||
tx->ss = ss;
|
||||
tx->len = dwords;
|
||||
qp->s_tx = tx;
|
||||
}
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Allocate a buffer and copy the header and payload to it. */
|
||||
tx->map_len = (plen + 1) << 2;
|
||||
piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
|
||||
if (unlikely(piobuf == NULL)) {
|
||||
ret = -EBUSY;
|
||||
goto err_tx;
|
||||
}
|
||||
tx->txreq.map_addr = piobuf;
|
||||
tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
|
||||
tx->txreq.sg_count = 1;
|
||||
|
||||
*piobuf++ = (__force u32) cpu_to_le32(plen);
|
||||
*piobuf++ = (__force u32) cpu_to_le32(control);
|
||||
memcpy(piobuf, hdr, hdrwords << 2);
|
||||
ipath_copy_from_sge(piobuf + hdrwords, ss, len);
|
||||
|
||||
ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
|
||||
/*
|
||||
* If we couldn't queue the DMA request, save the info
|
||||
* and try again later rather than destroying the
|
||||
* buffer and undoing the side effects of the copy.
|
||||
*/
|
||||
if (ret) {
|
||||
tx->ss = NULL;
|
||||
tx->len = 0;
|
||||
qp->s_tx = tx;
|
||||
}
|
||||
dev->n_unaligned++;
|
||||
goto bail;
|
||||
|
||||
err_tx:
|
||||
if (atomic_dec_and_test(&qp->refcount))
|
||||
wake_up(&qp->wait);
|
||||
put_txreq(dev, tx);
|
||||
bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ipath_verbs_send_pio(struct ipath_qp *qp,
|
||||
struct ipath_ib_header *ibhdr, u32 hdrwords,
|
||||
struct ipath_sge_state *ss, u32 len,
|
||||
u32 plen, u32 dwords)
|
||||
{
|
||||
struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
|
||||
u32 *hdr = (u32 *) ibhdr;
|
||||
u32 __iomem *piobuf;
|
||||
unsigned flush_wc;
|
||||
u32 control;
|
||||
int ret;
|
||||
|
||||
piobuf = ipath_getpiobuf(dd, NULL);
|
||||
piobuf = ipath_getpiobuf(dd, plen, NULL);
|
||||
if (unlikely(piobuf == NULL)) {
|
||||
ret = -EBUSY;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write len to control qword, no flags.
|
||||
* Get the saved delay count we computed for the previous packet
|
||||
* and save the delay count for this packet to be used next time
|
||||
* we get here.
|
||||
*/
|
||||
control = qp->s_pkt_delay;
|
||||
qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
|
||||
|
||||
/* VL15 packets bypass credit check */
|
||||
if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
|
||||
control |= 1ULL << 31;
|
||||
|
||||
/*
|
||||
* Write the length to the control qword plus any needed flags.
|
||||
* We have to flush after the PBC for correctness on some cpus
|
||||
* or WC buffer can be written out of order.
|
||||
*/
|
||||
writeq(plen, piobuf);
|
||||
writeq(((u64) control << 32) | plen, piobuf);
|
||||
piobuf += 2;
|
||||
|
||||
flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
|
||||
@ -961,15 +1278,25 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
|
||||
*/
|
||||
plen = hdrwords + dwords + 1;
|
||||
|
||||
/* Drop non-VL15 packets if we are not in the active state */
|
||||
if (!(dd->ipath_flags & IPATH_LINKACTIVE) &&
|
||||
qp->ibqp.qp_type != IB_QPT_SMI) {
|
||||
/*
|
||||
* VL15 packets (IB_QPT_SMI) will always use PIO, so we
|
||||
* can defer SDMA restart until link goes ACTIVE without
|
||||
* worrying about just how we got there.
|
||||
*/
|
||||
if (qp->ibqp.qp_type == IB_QPT_SMI)
|
||||
ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
|
||||
plen, dwords);
|
||||
/* All non-VL15 packets are dropped if link is not ACTIVE */
|
||||
else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) {
|
||||
if (qp->s_wqe)
|
||||
ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = ipath_verbs_send_pio(qp, (u32 *) hdr, hdrwords,
|
||||
ss, len, plen, dwords);
|
||||
} else if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
|
||||
ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
|
||||
plen, dwords);
|
||||
else
|
||||
ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
|
||||
plen, dwords);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1038,6 +1365,12 @@ int ipath_get_counters(struct ipath_devdata *dd,
|
||||
ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
|
||||
ipath_snap_cntr(dd, crp->cr_badformatcnt) +
|
||||
dd->ipath_rxfc_unsupvl_errs;
|
||||
if (crp->cr_rxotherlocalphyerrcnt)
|
||||
cntrs->port_rcv_errors +=
|
||||
ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
|
||||
if (crp->cr_rxvlerrcnt)
|
||||
cntrs->port_rcv_errors +=
|
||||
ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
|
||||
cntrs->port_rcv_remphys_errors =
|
||||
ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
|
||||
cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
|
||||
@ -1046,9 +1379,16 @@ int ipath_get_counters(struct ipath_devdata *dd,
|
||||
cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
|
||||
cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
|
||||
cntrs->local_link_integrity_errors =
|
||||
(dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
|
||||
dd->ipath_lli_errs : dd->ipath_lli_errors;
|
||||
cntrs->excessive_buffer_overrun_errors = dd->ipath_overrun_thresh_errs;
|
||||
crp->cr_locallinkintegrityerrcnt ?
|
||||
ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
|
||||
((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
|
||||
dd->ipath_lli_errs : dd->ipath_lli_errors);
|
||||
cntrs->excessive_buffer_overrun_errors =
|
||||
crp->cr_excessbufferovflcnt ?
|
||||
ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
|
||||
dd->ipath_overrun_thresh_errs;
|
||||
cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
|
||||
ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
|
||||
|
||||
ret = 0;
|
||||
|
||||
@ -1183,7 +1523,9 @@ static int ipath_query_port(struct ib_device *ibdev,
|
||||
props->sm_lid = dev->sm_lid;
|
||||
props->sm_sl = dev->sm_sl;
|
||||
ibcstat = dd->ipath_lastibcstat;
|
||||
props->state = ((ibcstat >> 4) & 0x3) + 1;
|
||||
/* map LinkState to IB portinfo values. */
|
||||
props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
|
||||
|
||||
/* See phys_state_show() */
|
||||
props->phys_state = /* MEA: assumes shift == 0 */
|
||||
ipath_cvt_physportstate[dd->ipath_lastibcstat &
|
||||
@ -1195,18 +1537,13 @@ static int ipath_query_port(struct ib_device *ibdev,
|
||||
props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
|
||||
dev->z_pkey_violations;
|
||||
props->qkey_viol_cntr = dev->qkey_violations;
|
||||
props->active_width = IB_WIDTH_4X;
|
||||
props->active_width = dd->ipath_link_width_active;
|
||||
/* See rate_show() */
|
||||
props->active_speed = 1; /* Regular 10Mbs speed. */
|
||||
props->active_speed = dd->ipath_link_speed_active;
|
||||
props->max_vl_num = 1; /* VLCap = VL0 */
|
||||
props->init_type_reply = 0;
|
||||
|
||||
/*
|
||||
* Note: the chip supports a maximum MTU of 4096, but the driver
|
||||
* hasn't implemented this feature yet, so set the maximum value
|
||||
* to 2048.
|
||||
*/
|
||||
props->max_mtu = IB_MTU_2048;
|
||||
props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
|
||||
switch (dd->ipath_ibmtu) {
|
||||
case 4096:
|
||||
mtu = IB_MTU_4096;
|
||||
@ -1399,6 +1736,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
|
||||
|
||||
/* ib_create_ah() will initialize ah->ibah. */
|
||||
ah->attr = *ah_attr;
|
||||
ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
|
||||
|
||||
ret = &ah->ibah;
|
||||
|
||||
@ -1432,6 +1770,7 @@ static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
|
||||
struct ipath_ah *ah = to_iah(ibah);
|
||||
|
||||
*ah_attr = ah->attr;
|
||||
ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1581,6 +1920,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
struct ipath_verbs_counters cntrs;
|
||||
struct ipath_ibdev *idev;
|
||||
struct ib_device *dev;
|
||||
struct ipath_verbs_txreq *tx;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
|
||||
@ -1591,6 +1932,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
|
||||
dev = &idev->ibdev;
|
||||
|
||||
if (dd->ipath_sdma_descq_cnt) {
|
||||
tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx,
|
||||
GFP_KERNEL);
|
||||
if (tx == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_tx;
|
||||
}
|
||||
} else
|
||||
tx = NULL;
|
||||
idev->txreq_bufs = tx;
|
||||
|
||||
/* Only need to initialize non-zero fields. */
|
||||
spin_lock_init(&idev->n_pds_lock);
|
||||
spin_lock_init(&idev->n_ahs_lock);
|
||||
@ -1631,15 +1983,17 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
INIT_LIST_HEAD(&idev->pending[2]);
|
||||
INIT_LIST_HEAD(&idev->piowait);
|
||||
INIT_LIST_HEAD(&idev->rnrwait);
|
||||
INIT_LIST_HEAD(&idev->txreq_free);
|
||||
idev->pending_index = 0;
|
||||
idev->port_cap_flags =
|
||||
IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
|
||||
if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
|
||||
idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
|
||||
idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
|
||||
idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
|
||||
idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
|
||||
idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
|
||||
idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
|
||||
idev->link_width_enabled = 3; /* 1x or 4x */
|
||||
|
||||
/* Snapshot current HW counters to "clear" them. */
|
||||
ipath_get_counters(dd, &cntrs);
|
||||
@ -1661,6 +2015,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
cntrs.excessive_buffer_overrun_errors;
|
||||
idev->z_vl15_dropped = cntrs.vl15_dropped;
|
||||
|
||||
for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
|
||||
list_add(&tx->txreq.list, &idev->txreq_free);
|
||||
|
||||
/*
|
||||
* The system image GUID is supposed to be the same for all
|
||||
* IB HCAs in a single system but since there can be other
|
||||
@ -1710,6 +2067,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
dev->phys_port_cnt = 1;
|
||||
dev->num_comp_vectors = 1;
|
||||
dev->dma_device = &dd->pcidev->dev;
|
||||
dev->class_dev.dev = dev->dma_device;
|
||||
dev->query_device = ipath_query_device;
|
||||
dev->modify_device = ipath_modify_device;
|
||||
dev->query_port = ipath_query_port;
|
||||
@ -1774,6 +2132,8 @@ err_reg:
|
||||
err_lk:
|
||||
kfree(idev->qp_table.table);
|
||||
err_qp:
|
||||
kfree(idev->txreq_bufs);
|
||||
err_tx:
|
||||
ib_dealloc_device(dev);
|
||||
ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
|
||||
idev = NULL;
|
||||
@ -1808,6 +2168,7 @@ void ipath_unregister_ib_device(struct ipath_ibdev *dev)
|
||||
ipath_free_all_qps(&dev->qp_table);
|
||||
kfree(dev->qp_table.table);
|
||||
kfree(dev->lk_table.table);
|
||||
kfree(dev->txreq_bufs);
|
||||
ib_dealloc_device(ibdev);
|
||||
}
|
||||
|
||||
@ -1855,13 +2216,15 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
|
||||
"RC stalls %d\n"
|
||||
"piobuf wait %d\n"
|
||||
"no piobuf %d\n"
|
||||
"unaligned %d\n"
|
||||
"PKT drops %d\n"
|
||||
"WQE errs %d\n",
|
||||
dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
|
||||
dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
|
||||
dev->n_other_naks, dev->n_timeouts,
|
||||
dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait,
|
||||
dev->n_no_piobuf, dev->n_pkt_drops, dev->n_wqe_errs);
|
||||
dev->n_no_piobuf, dev->n_unaligned,
|
||||
dev->n_pkt_drops, dev->n_wqe_errs);
|
||||
for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
|
||||
const struct ipath_opcode_stats *si = &dev->opstats[i];
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
@ -138,6 +138,11 @@ struct ipath_ib_header {
|
||||
} u;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct ipath_pio_header {
|
||||
__le32 pbc[2];
|
||||
struct ipath_ib_header hdr;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/*
|
||||
* There is one struct ipath_mcast for each multicast GID.
|
||||
* All attached QPs are then stored as a list of
|
||||
@ -319,6 +324,7 @@ struct ipath_sge_state {
|
||||
struct ipath_sge *sg_list; /* next SGE to be used if any */
|
||||
struct ipath_sge sge; /* progress state for the current SGE */
|
||||
u8 num_sge;
|
||||
u8 static_rate;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -356,6 +362,7 @@ struct ipath_qp {
|
||||
struct tasklet_struct s_task;
|
||||
struct ipath_mmap_info *ip;
|
||||
struct ipath_sge_state *s_cur_sge;
|
||||
struct ipath_verbs_txreq *s_tx;
|
||||
struct ipath_sge_state s_sge; /* current send request data */
|
||||
struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
|
||||
struct ipath_sge_state s_ack_rdma_sge;
|
||||
@ -363,7 +370,8 @@ struct ipath_qp {
|
||||
struct ipath_sge_state r_sge; /* current receive data */
|
||||
spinlock_t s_lock;
|
||||
unsigned long s_busy;
|
||||
u32 s_hdrwords; /* size of s_hdr in 32 bit words */
|
||||
u16 s_pkt_delay;
|
||||
u16 s_hdrwords; /* size of s_hdr in 32 bit words */
|
||||
u32 s_cur_size; /* size of send packet in bytes */
|
||||
u32 s_len; /* total length of s_sge */
|
||||
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
|
||||
@ -387,7 +395,6 @@ struct ipath_qp {
|
||||
u8 r_nak_state; /* non-zero if NAK is pending */
|
||||
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
|
||||
u8 r_reuse_sge; /* for UC receive errors */
|
||||
u8 r_sge_inx; /* current index into sg_list */
|
||||
u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
|
||||
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
|
||||
u8 r_head_ack_queue; /* index into s_ack_queue[] */
|
||||
@ -403,6 +410,7 @@ struct ipath_qp {
|
||||
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
|
||||
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
|
||||
u8 s_flags;
|
||||
u8 s_dmult;
|
||||
u8 timeout; /* Timeout for this QP */
|
||||
enum ib_mtu path_mtu;
|
||||
u32 remote_qpn;
|
||||
@ -510,6 +518,8 @@ struct ipath_ibdev {
|
||||
struct ipath_lkey_table lk_table;
|
||||
struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */
|
||||
struct list_head piowait; /* list for wait PIO buf */
|
||||
struct list_head txreq_free;
|
||||
void *txreq_bufs;
|
||||
/* list of QPs waiting for RNR timer */
|
||||
struct list_head rnrwait;
|
||||
spinlock_t pending_lock;
|
||||
@ -570,6 +580,7 @@ struct ipath_ibdev {
|
||||
u32 n_rdma_dup_busy;
|
||||
u32 n_piowait;
|
||||
u32 n_no_piobuf;
|
||||
u32 n_unaligned;
|
||||
u32 port_cap_flags;
|
||||
u32 pma_sample_start;
|
||||
u32 pma_sample_interval;
|
||||
@ -581,7 +592,6 @@ struct ipath_ibdev {
|
||||
u16 pending_index; /* which pending queue is active */
|
||||
u8 pma_sample_status;
|
||||
u8 subnet_timeout;
|
||||
u8 link_width_enabled;
|
||||
u8 vl_high_limit;
|
||||
struct ipath_opcode_stats opstats[128];
|
||||
};
|
||||
@ -602,6 +612,16 @@ struct ipath_verbs_counters {
|
||||
u32 vl15_dropped;
|
||||
};
|
||||
|
||||
struct ipath_verbs_txreq {
|
||||
struct ipath_qp *qp;
|
||||
struct ipath_swqe *wqe;
|
||||
u32 map_len;
|
||||
u32 len;
|
||||
struct ipath_sge_state *ss;
|
||||
struct ipath_pio_header hdr;
|
||||
struct ipath_sdma_txreq txreq;
|
||||
};
|
||||
|
||||
static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
|
||||
{
|
||||
return container_of(ibmr, struct ipath_mr, ibmr);
|
||||
@ -694,11 +714,11 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc);
|
||||
|
||||
void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
|
||||
|
||||
unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
|
||||
|
||||
int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
|
||||
u32 hdrwords, struct ipath_sge_state *ss, u32 len);
|
||||
|
||||
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
|
||||
|
||||
void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
|
||||
|
||||
void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
|
||||
|
@ -85,6 +85,82 @@ static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq)
|
||||
return get_sw_cqe(cq, cq->mcq.cons_index);
|
||||
}
|
||||
|
||||
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
struct mlx4_ib_cq *mcq = to_mcq(cq);
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->device);
|
||||
|
||||
return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period);
|
||||
}
|
||||
|
||||
static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
PAGE_SIZE * 2, &buf->buf);
|
||||
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
|
||||
&buf->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
|
||||
|
||||
err_buf:
|
||||
mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
|
||||
&buf->buf);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
|
||||
{
|
||||
mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
|
||||
}
|
||||
|
||||
static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
|
||||
struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
|
||||
u64 buf_addr, int cqe)
|
||||
{
|
||||
int err;
|
||||
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem),
|
||||
ilog2((*umem)->page_size), &buf->mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
return 0;
|
||||
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &buf->mtt);
|
||||
|
||||
err_buf:
|
||||
ib_umem_release(*umem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata)
|
||||
@ -92,7 +168,6 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx4_ib_cq *cq;
|
||||
struct mlx4_uar *uar;
|
||||
int buf_size;
|
||||
int err;
|
||||
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes)
|
||||
@ -104,8 +179,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
cq->ibcq.cqe = entries - 1;
|
||||
buf_size = entries * sizeof (struct mlx4_cqe);
|
||||
mutex_init(&cq->resize_mutex);
|
||||
spin_lock_init(&cq->lock);
|
||||
cq->resize_buf = NULL;
|
||||
cq->resize_umem = NULL;
|
||||
|
||||
if (context) {
|
||||
struct mlx4_ib_create_cq ucmd;
|
||||
@ -115,21 +192,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
cq->umem = ib_umem_get(context, ucmd.buf_addr, buf_size,
|
||||
IB_ACCESS_LOCAL_WRITE);
|
||||
if (IS_ERR(cq->umem)) {
|
||||
err = PTR_ERR(cq->umem);
|
||||
err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
|
||||
ucmd.buf_addr, entries);
|
||||
if (err)
|
||||
goto err_cq;
|
||||
}
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, ib_umem_page_count(cq->umem),
|
||||
ilog2(cq->umem->page_size), &cq->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_ib_umem_write_mtt(dev, &cq->buf.mtt, cq->umem);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
|
||||
&cq->db);
|
||||
@ -147,19 +213,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
|
||||
*cq->mcq.set_ci_db = 0;
|
||||
*cq->mcq.arm_db = 0;
|
||||
|
||||
if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &cq->buf.buf)) {
|
||||
err = -ENOMEM;
|
||||
err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries);
|
||||
if (err)
|
||||
goto err_db;
|
||||
}
|
||||
|
||||
err = mlx4_mtt_init(dev->dev, cq->buf.buf.npages, cq->buf.buf.page_shift,
|
||||
&cq->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
err = mlx4_buf_write_mtt(dev->dev, &cq->buf.mtt, &cq->buf.buf);
|
||||
if (err)
|
||||
goto err_mtt;
|
||||
|
||||
uar = &dev->priv_uar;
|
||||
}
|
||||
@ -187,12 +243,10 @@ err_dbmap:
|
||||
err_mtt:
|
||||
mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt);
|
||||
|
||||
err_buf:
|
||||
if (context)
|
||||
ib_umem_release(cq->umem);
|
||||
else
|
||||
mlx4_buf_free(dev->dev, entries * sizeof (struct mlx4_cqe),
|
||||
&cq->buf.buf);
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, entries);
|
||||
|
||||
err_db:
|
||||
if (!context)
|
||||
@ -204,6 +258,170 @@ err_cq:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
|
||||
int entries)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (cq->resize_buf)
|
||||
return -EBUSY;
|
||||
|
||||
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
|
||||
if (!cq->resize_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
|
||||
if (err) {
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->resize_buf->cqe = entries - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
|
||||
int entries, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_resize_cq ucmd;
|
||||
int err;
|
||||
|
||||
if (cq->resize_umem)
|
||||
return -EBUSY;
|
||||
|
||||
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
|
||||
return -EFAULT;
|
||||
|
||||
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
|
||||
if (!cq->resize_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf,
|
||||
&cq->resize_umem, ucmd.buf_addr, entries);
|
||||
if (err) {
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->resize_buf->cqe = entries - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
i = cq->mcq.cons_index;
|
||||
while (get_sw_cqe(cq, i & cq->ibcq.cqe))
|
||||
++i;
|
||||
|
||||
return i - cq->mcq.cons_index;
|
||||
}
|
||||
|
||||
static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
|
||||
{
|
||||
struct mlx4_cqe *cqe;
|
||||
int i;
|
||||
|
||||
i = cq->mcq.cons_index;
|
||||
cqe = get_cqe(cq, i & cq->ibcq.cqe);
|
||||
while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
|
||||
memcpy(get_cqe_from_buf(&cq->resize_buf->buf,
|
||||
(i + 1) & cq->resize_buf->cqe),
|
||||
get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
|
||||
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
|
||||
}
|
||||
++cq->mcq.cons_index;
|
||||
}
|
||||
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
|
||||
struct mlx4_ib_cq *cq = to_mcq(ibcq);
|
||||
int outst_cqe;
|
||||
int err;
|
||||
|
||||
mutex_lock(&cq->resize_mutex);
|
||||
|
||||
if (entries < 1 || entries > dev->dev->caps.max_cqes) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
entries = roundup_pow_of_two(entries + 1);
|
||||
if (entries == ibcq->cqe + 1) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ibcq->uobject) {
|
||||
err = mlx4_alloc_resize_umem(dev, cq, entries, udata);
|
||||
if (err)
|
||||
goto out;
|
||||
} else {
|
||||
/* Can't be smaller then the number of outstanding CQEs */
|
||||
outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
|
||||
if (entries < outst_cqe + 1) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx4_alloc_resize_buf(dev, cq, entries);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
|
||||
if (err)
|
||||
goto err_buf;
|
||||
|
||||
if (ibcq->uobject) {
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
ib_umem_release(cq->umem);
|
||||
cq->umem = cq->resize_umem;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
cq->resize_umem = NULL;
|
||||
} else {
|
||||
spin_lock_irq(&cq->lock);
|
||||
if (cq->resize_buf) {
|
||||
mlx4_ib_cq_resize_copy_cqes(cq);
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
}
|
||||
spin_unlock_irq(&cq->lock);
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
||||
err_buf:
|
||||
if (!ibcq->uobject)
|
||||
mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
|
||||
cq->resize_buf->cqe);
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
|
||||
if (cq->resize_umem) {
|
||||
ib_umem_release(cq->resize_umem);
|
||||
cq->resize_umem = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&cq->resize_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx4_ib_destroy_cq(struct ib_cq *cq)
|
||||
{
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->device);
|
||||
@ -216,8 +434,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
|
||||
mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db);
|
||||
ib_umem_release(mcq->umem);
|
||||
} else {
|
||||
mlx4_buf_free(dev->dev, (cq->cqe + 1) * sizeof (struct mlx4_cqe),
|
||||
&mcq->buf.buf);
|
||||
mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
|
||||
mlx4_ib_db_free(dev, &mcq->db);
|
||||
}
|
||||
|
||||
@ -297,6 +514,20 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
|
||||
wc->vendor_err = cqe->vendor_err_syndrome;
|
||||
}
|
||||
|
||||
static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum)
|
||||
{
|
||||
return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
|
||||
MLX4_CQE_IPOIB_STATUS_IPV4F |
|
||||
MLX4_CQE_IPOIB_STATUS_IPV4OPT |
|
||||
MLX4_CQE_IPOIB_STATUS_IPV6 |
|
||||
MLX4_CQE_IPOIB_STATUS_IPOK)) ==
|
||||
cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 |
|
||||
MLX4_CQE_IPOIB_STATUS_IPOK)) &&
|
||||
(status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP |
|
||||
MLX4_CQE_IPOIB_STATUS_TCP)) &&
|
||||
checksum == cpu_to_be16(0xffff);
|
||||
}
|
||||
|
||||
static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
struct mlx4_ib_qp **cur_qp,
|
||||
struct ib_wc *wc)
|
||||
@ -310,6 +541,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
u32 g_mlpath_rqpn;
|
||||
u16 wqe_ctr;
|
||||
|
||||
repoll:
|
||||
cqe = next_cqe_sw(cq);
|
||||
if (!cqe)
|
||||
return -EAGAIN;
|
||||
@ -332,6 +564,22 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Resize CQ in progress */
|
||||
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
|
||||
if (cq->resize_buf) {
|
||||
struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device);
|
||||
|
||||
mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
|
||||
cq->buf = cq->resize_buf->buf;
|
||||
cq->ibcq.cqe = cq->resize_buf->cqe;
|
||||
|
||||
kfree(cq->resize_buf);
|
||||
cq->resize_buf = NULL;
|
||||
}
|
||||
|
||||
goto repoll;
|
||||
}
|
||||
|
||||
if (!*cur_qp ||
|
||||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
|
||||
/*
|
||||
@ -406,6 +654,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
case MLX4_OPCODE_BIND_MW:
|
||||
wc->opcode = IB_WC_BIND_MW;
|
||||
break;
|
||||
case MLX4_OPCODE_LSO:
|
||||
wc->opcode = IB_WC_LSO;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
|
||||
@ -434,6 +685,8 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
|
||||
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
|
||||
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
|
||||
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
|
||||
wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status,
|
||||
cqe->checksum);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -165,7 +165,7 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad)
|
||||
event.device = ibdev;
|
||||
event.element.port_num = port_num;
|
||||
|
||||
if(pinfo->clientrereg_resv_subnetto & 0x80)
|
||||
if (pinfo->clientrereg_resv_subnetto & 0x80)
|
||||
event.event = IB_EVENT_CLIENT_REREGISTER;
|
||||
else
|
||||
event.event = IB_EVENT_LID_CHANGE;
|
||||
|
@ -44,8 +44,8 @@
|
||||
#include "user.h"
|
||||
|
||||
#define DRV_NAME "mlx4_ib"
|
||||
#define DRV_VERSION "0.01"
|
||||
#define DRV_RELDATE "May 1, 2006"
|
||||
#define DRV_VERSION "1.0"
|
||||
#define DRV_RELDATE "April 4, 2008"
|
||||
|
||||
MODULE_AUTHOR("Roland Dreier");
|
||||
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
|
||||
@ -99,6 +99,10 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
|
||||
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
|
||||
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
|
||||
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
|
||||
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
|
||||
if (dev->dev->caps.max_gso_sz)
|
||||
props->device_cap_flags |= IB_DEVICE_UD_TSO;
|
||||
|
||||
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
|
||||
0xffffff;
|
||||
@ -567,6 +571,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
|
||||
(1ull << IB_USER_VERBS_CMD_CREATE_QP) |
|
||||
(1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
|
||||
@ -605,6 +610,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
ibdev->ib_dev.post_send = mlx4_ib_post_send;
|
||||
ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
|
||||
ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
|
||||
ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
|
||||
ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
|
||||
ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
|
||||
ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
|
||||
ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
|
||||
@ -675,18 +682,20 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
|
||||
}
|
||||
|
||||
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
||||
enum mlx4_dev_event event, int subtype,
|
||||
int port)
|
||||
enum mlx4_dev_event event, int port)
|
||||
{
|
||||
struct ib_event ibev;
|
||||
|
||||
switch (event) {
|
||||
case MLX4_EVENT_TYPE_PORT_CHANGE:
|
||||
ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
|
||||
IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
|
||||
case MLX4_DEV_EVENT_PORT_UP:
|
||||
ibev.event = IB_EVENT_PORT_ACTIVE;
|
||||
break;
|
||||
|
||||
case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
|
||||
case MLX4_DEV_EVENT_PORT_DOWN:
|
||||
ibev.event = IB_EVENT_PORT_ERR;
|
||||
break;
|
||||
|
||||
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
|
||||
ibev.event = IB_EVENT_DEVICE_FATAL;
|
||||
break;
|
||||
|
||||
|
@ -78,13 +78,21 @@ struct mlx4_ib_cq_buf {
|
||||
struct mlx4_mtt mtt;
|
||||
};
|
||||
|
||||
struct mlx4_ib_cq_resize {
|
||||
struct mlx4_ib_cq_buf buf;
|
||||
int cqe;
|
||||
};
|
||||
|
||||
struct mlx4_ib_cq {
|
||||
struct ib_cq ibcq;
|
||||
struct mlx4_cq mcq;
|
||||
struct mlx4_ib_cq_buf buf;
|
||||
struct mlx4_ib_cq_resize *resize_buf;
|
||||
struct mlx4_ib_db db;
|
||||
spinlock_t lock;
|
||||
struct mutex resize_mutex;
|
||||
struct ib_umem *umem;
|
||||
struct ib_umem *resize_umem;
|
||||
};
|
||||
|
||||
struct mlx4_ib_mr {
|
||||
@ -110,6 +118,10 @@ struct mlx4_ib_wq {
|
||||
unsigned tail;
|
||||
};
|
||||
|
||||
enum mlx4_ib_qp_flags {
|
||||
MLX4_IB_QP_LSO = 1 << 0
|
||||
};
|
||||
|
||||
struct mlx4_ib_qp {
|
||||
struct ib_qp ibqp;
|
||||
struct mlx4_qp mqp;
|
||||
@ -129,6 +141,7 @@ struct mlx4_ib_qp {
|
||||
struct mlx4_mtt mtt;
|
||||
int buf_size;
|
||||
struct mutex mutex;
|
||||
u32 flags;
|
||||
u8 port;
|
||||
u8 alt_port;
|
||||
u8 atomic_rd_en;
|
||||
@ -249,6 +262,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
struct ib_udata *udata);
|
||||
int mlx4_ib_dereg_mr(struct ib_mr *mr);
|
||||
|
||||
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
|
||||
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector,
|
||||
struct ib_ucontext *context,
|
||||
struct ib_udata *udata);
|
||||
|
@ -71,6 +71,7 @@ enum {
|
||||
|
||||
static const __be32 mlx4_ib_opcode[] = {
|
||||
[IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
|
||||
[IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
|
||||
[IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
|
||||
[IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
|
||||
[IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
|
||||
@ -122,7 +123,7 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
|
||||
*/
|
||||
static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
|
||||
{
|
||||
u32 *wqe;
|
||||
__be32 *wqe;
|
||||
int i;
|
||||
int s;
|
||||
int ind;
|
||||
@ -143,7 +144,7 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
|
||||
buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
|
||||
for (i = 64; i < s; i += 64) {
|
||||
wqe = buf + i;
|
||||
*wqe = 0xffffffff;
|
||||
*wqe = cpu_to_be32(0xffffffff);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
|
||||
}
|
||||
}
|
||||
|
||||
static int send_wqe_overhead(enum ib_qp_type type)
|
||||
static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
|
||||
{
|
||||
/*
|
||||
* UD WQEs must have a datagram segment.
|
||||
@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
|
||||
switch (type) {
|
||||
case IB_QPT_UD:
|
||||
return sizeof (struct mlx4_wqe_ctrl_seg) +
|
||||
sizeof (struct mlx4_wqe_datagram_seg);
|
||||
sizeof (struct mlx4_wqe_datagram_seg) +
|
||||
((flags & MLX4_IB_QP_LSO) ? 64 : 0);
|
||||
case IB_QPT_UC:
|
||||
return sizeof (struct mlx4_wqe_ctrl_seg) +
|
||||
sizeof (struct mlx4_wqe_raddr_seg);
|
||||
@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
/* Sanity check SQ size before proceeding */
|
||||
if (cap->max_send_wr > dev->dev->caps.max_wqes ||
|
||||
cap->max_send_sge > dev->dev->caps.max_sq_sg ||
|
||||
cap->max_inline_data + send_wqe_overhead(type) +
|
||||
cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
|
||||
sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
|
||||
return -EINVAL;
|
||||
|
||||
@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
|
||||
s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
|
||||
cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
|
||||
send_wqe_overhead(type);
|
||||
send_wqe_overhead(type, qp->flags);
|
||||
|
||||
/*
|
||||
* Hermon supports shrinking WQEs, such that a single work
|
||||
@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
|
||||
}
|
||||
|
||||
qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
|
||||
send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
|
||||
send_wqe_overhead(type, qp->flags)) /
|
||||
sizeof (struct mlx4_wqe_data_seg);
|
||||
|
||||
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
|
||||
(qp->sq.wqe_cnt << qp->sq.wqe_shift);
|
||||
@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
|
||||
} else {
|
||||
qp->sq_no_prefetch = 0;
|
||||
|
||||
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
|
||||
qp->flags |= MLX4_IB_QP_LSO;
|
||||
|
||||
err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -673,6 +679,13 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
|
||||
struct mlx4_ib_qp *qp;
|
||||
int err;
|
||||
|
||||
/* We only support LSO, and only for kernel UD QPs. */
|
||||
if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
|
||||
(pd->uobject || init_attr->qp_type != IB_QPT_UD))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_UC:
|
||||
@ -876,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
|
||||
}
|
||||
}
|
||||
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
|
||||
ibqp->qp_type == IB_QPT_UD)
|
||||
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
|
||||
else if (attr_mask & IB_QP_PATH_MTU) {
|
||||
else if (ibqp->qp_type == IB_QPT_UD) {
|
||||
if (qp->flags & MLX4_IB_QP_LSO)
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) |
|
||||
ilog2(dev->dev->caps.max_gso_sz);
|
||||
else
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
|
||||
} else if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
|
||||
printk(KERN_ERR "path MTU (%u) is invalid\n",
|
||||
attr->path_mtu);
|
||||
@ -1182,7 +1200,7 @@ out:
|
||||
}
|
||||
|
||||
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
void *wqe)
|
||||
void *wqe, unsigned *mlx_seg_len)
|
||||
{
|
||||
struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
|
||||
struct mlx4_wqe_mlx_seg *mlx = wqe;
|
||||
@ -1231,7 +1249,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
|
||||
sqp->ud_header.immediate_present = 1;
|
||||
sqp->ud_header.immediate_data = wr->imm_data;
|
||||
sqp->ud_header.immediate_data = wr->ex.imm_data;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -1303,7 +1321,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
|
||||
i = 2;
|
||||
}
|
||||
|
||||
return ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
|
||||
*mlx_seg_len =
|
||||
ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
|
||||
@ -1396,6 +1416,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
|
||||
dseg->addr = cpu_to_be64(sg->addr);
|
||||
}
|
||||
|
||||
static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
|
||||
struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
|
||||
{
|
||||
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
|
||||
|
||||
/*
|
||||
* This is a temporary limitation and will be removed in
|
||||
* a forthcoming FW release:
|
||||
*/
|
||||
if (unlikely(halign > 64))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
|
||||
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
|
||||
|
||||
/* make sure LSO header is written before overwriting stamping */
|
||||
wmb();
|
||||
|
||||
wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
|
||||
wr->wr.ud.hlen);
|
||||
|
||||
*lso_seg_len = halign;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
struct ib_send_wr **bad_wr)
|
||||
{
|
||||
@ -1409,6 +1457,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
unsigned ind;
|
||||
int uninitialized_var(stamp);
|
||||
int uninitialized_var(size);
|
||||
unsigned seglen;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&qp->sq.lock, flags);
|
||||
@ -1436,11 +1485,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |
|
||||
(wr->send_flags & IB_SEND_SOLICITED ?
|
||||
cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |
|
||||
((wr->send_flags & IB_SEND_IP_CSUM) ?
|
||||
cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
|
||||
MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) |
|
||||
qp->sq_signal_bits;
|
||||
|
||||
if (wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
|
||||
ctrl->imm = wr->imm_data;
|
||||
ctrl->imm = wr->ex.imm_data;
|
||||
else
|
||||
ctrl->imm = 0;
|
||||
|
||||
@ -1484,19 +1536,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
set_datagram_seg(wqe, wr);
|
||||
wqe += sizeof (struct mlx4_wqe_datagram_seg);
|
||||
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
|
||||
|
||||
if (wr->opcode == IB_WR_LSO) {
|
||||
err = build_lso_seg(wqe, wr, qp, &seglen);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
}
|
||||
wqe += seglen;
|
||||
size += seglen / 16;
|
||||
}
|
||||
break;
|
||||
|
||||
case IB_QPT_SMI:
|
||||
case IB_QPT_GSI:
|
||||
err = build_mlx_header(to_msqp(qp), wr, ctrl);
|
||||
if (err < 0) {
|
||||
err = build_mlx_header(to_msqp(qp), wr, ctrl, &seglen);
|
||||
if (unlikely(err)) {
|
||||
*bad_wr = wr;
|
||||
goto out;
|
||||
}
|
||||
wqe += err;
|
||||
size += err / 16;
|
||||
|
||||
err = 0;
|
||||
wqe += seglen;
|
||||
size += seglen / 16;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1725,7 +1785,9 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
|
||||
struct mlx4_ib_qp *qp = to_mqp(ibqp);
|
||||
struct mlx4_qp_context context;
|
||||
int mlx4_state;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&qp->mutex);
|
||||
|
||||
if (qp->state == IB_QPS_RESET) {
|
||||
qp_attr->qp_state = IB_QPS_RESET;
|
||||
@ -1733,12 +1795,15 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
|
||||
}
|
||||
|
||||
err = mlx4_qp_query(dev->dev, &qp->mqp, &context);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
if (err) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mlx4_state = be32_to_cpu(context.flags) >> 28;
|
||||
|
||||
qp_attr->qp_state = to_ib_qp_state(mlx4_state);
|
||||
qp->state = to_ib_qp_state(mlx4_state);
|
||||
qp_attr->qp_state = qp->state;
|
||||
qp_attr->path_mtu = context.mtu_msgmax >> 5;
|
||||
qp_attr->path_mig_state =
|
||||
to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);
|
||||
@ -1797,6 +1862,8 @@ done:
|
||||
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&qp->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -219,7 +219,7 @@ static void mthca_cmd_post_dbell(struct mthca_dev *dev,
|
||||
__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
|
||||
(1 << HCA_E_BIT) |
|
||||
(op_modifier << HCR_OPMOD_SHIFT) |
|
||||
op), ptr + offs[6]);
|
||||
op), ptr + offs[6]);
|
||||
wmb();
|
||||
__raw_writel((__force u32) 0, ptr + offs[7]);
|
||||
wmb();
|
||||
@ -1339,6 +1339,10 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
|
||||
/* Check port for UD address vector: */
|
||||
*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(1);
|
||||
|
||||
/* Enable IPoIB checksumming if we can: */
|
||||
if (dev->device_cap_flags & IB_DEVICE_UD_IP_CSUM)
|
||||
*(inbox + INIT_HCA_FLAGS2_OFFSET / 4) |= cpu_to_be32(7 << 3);
|
||||
|
||||
/* We leave wqe_quota, responder_exu, etc as 0 (default) */
|
||||
|
||||
/* QPC/EEC/CQC/EQC/RDB attributes */
|
||||
|
@ -103,6 +103,7 @@ enum {
|
||||
DEV_LIM_FLAG_RAW_IPV6 = 1 << 4,
|
||||
DEV_LIM_FLAG_RAW_ETHER = 1 << 5,
|
||||
DEV_LIM_FLAG_SRQ = 1 << 6,
|
||||
DEV_LIM_FLAG_IPOIB_CSUM = 1 << 7,
|
||||
DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8,
|
||||
DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9,
|
||||
DEV_LIM_FLAG_MW = 1 << 16,
|
||||
|
@ -119,7 +119,8 @@ struct mthca_cqe {
|
||||
__be32 my_qpn;
|
||||
__be32 my_ee;
|
||||
__be32 rqpn;
|
||||
__be16 sl_g_mlpath;
|
||||
u8 sl_ipok;
|
||||
u8 g_mlpath;
|
||||
__be16 rlid;
|
||||
__be32 imm_etype_pkey_eec;
|
||||
__be32 byte_cnt;
|
||||
@ -493,6 +494,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
|
||||
int is_send;
|
||||
int free_cqe = 1;
|
||||
int err = 0;
|
||||
u16 checksum;
|
||||
|
||||
cqe = next_cqe_sw(cq);
|
||||
if (!cqe)
|
||||
@ -635,12 +637,14 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
|
||||
break;
|
||||
}
|
||||
entry->slid = be16_to_cpu(cqe->rlid);
|
||||
entry->sl = be16_to_cpu(cqe->sl_g_mlpath) >> 12;
|
||||
entry->sl = cqe->sl_ipok >> 4;
|
||||
entry->src_qp = be32_to_cpu(cqe->rqpn) & 0xffffff;
|
||||
entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;
|
||||
entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
|
||||
entry->pkey_index = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
|
||||
entry->wc_flags |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?
|
||||
IB_WC_GRH : 0;
|
||||
entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
|
||||
checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
|
||||
((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
|
||||
entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff);
|
||||
}
|
||||
|
||||
entry->status = IB_WC_SUCCESS;
|
||||
|
@ -54,8 +54,8 @@
|
||||
|
||||
#define DRV_NAME "ib_mthca"
|
||||
#define PFX DRV_NAME ": "
|
||||
#define DRV_VERSION "0.08"
|
||||
#define DRV_RELDATE "February 14, 2006"
|
||||
#define DRV_VERSION "1.0"
|
||||
#define DRV_RELDATE "April 4, 2008"
|
||||
|
||||
enum {
|
||||
MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
|
||||
@ -390,11 +390,11 @@ extern void __buggy_use_of_MTHCA_PUT(void);
|
||||
do { \
|
||||
void *__p = (char *) (source) + (offset); \
|
||||
switch (sizeof (dest)) { \
|
||||
case 1: (dest) = *(u8 *) __p; break; \
|
||||
case 2: (dest) = be16_to_cpup(__p); break; \
|
||||
case 4: (dest) = be32_to_cpup(__p); break; \
|
||||
case 8: (dest) = be64_to_cpup(__p); break; \
|
||||
default: __buggy_use_of_MTHCA_GET(); \
|
||||
case 1: (dest) = *(u8 *) __p; break; \
|
||||
case 2: (dest) = be16_to_cpup(__p); break; \
|
||||
case 4: (dest) = be32_to_cpup(__p); break; \
|
||||
case 8: (dest) = be64_to_cpup(__p); break; \
|
||||
default: __buggy_use_of_MTHCA_GET(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -232,9 +232,9 @@ static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
|
||||
return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
|
||||
static inline struct mthca_eqe *next_eqe_sw(struct mthca_eq *eq)
|
||||
{
|
||||
struct mthca_eqe* eqe;
|
||||
struct mthca_eqe *eqe;
|
||||
eqe = get_eqe(eq, eq->cons_index);
|
||||
return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ static void smp_snoop(struct ib_device *ibdev,
|
||||
event.device = ibdev;
|
||||
event.element.port_num = port_num;
|
||||
|
||||
if(pinfo->clientrereg_resv_subnetto & 0x80)
|
||||
if (pinfo->clientrereg_resv_subnetto & 0x80)
|
||||
event.event = IB_EVENT_CLIENT_REREGISTER;
|
||||
else
|
||||
event.event = IB_EVENT_LID_CHANGE;
|
||||
|
@ -267,11 +267,16 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
|
||||
if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
|
||||
mdev->mthca_flags |= MTHCA_FLAG_SRQ;
|
||||
|
||||
if (mthca_is_memfree(mdev))
|
||||
if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
|
||||
mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mthca_init_tavor(struct mthca_dev *mdev)
|
||||
{
|
||||
s64 size;
|
||||
u8 status;
|
||||
int err;
|
||||
struct mthca_dev_lim dev_lim;
|
||||
@ -324,9 +329,11 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
|
||||
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
|
||||
profile.num_srq = dev_lim.max_srqs;
|
||||
|
||||
err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
|
||||
if (err < 0)
|
||||
size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
|
||||
if (size < 0) {
|
||||
err = size;
|
||||
goto err_disable;
|
||||
}
|
||||
|
||||
err = mthca_INIT_HCA(mdev, &init_hca, &status);
|
||||
if (err) {
|
||||
@ -605,7 +612,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
|
||||
struct mthca_dev_lim dev_lim;
|
||||
struct mthca_profile profile;
|
||||
struct mthca_init_hca_param init_hca;
|
||||
u64 icm_size;
|
||||
s64 icm_size;
|
||||
u8 status;
|
||||
int err;
|
||||
|
||||
@ -653,7 +660,7 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
|
||||
profile.num_srq = dev_lim.max_srqs;
|
||||
|
||||
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
|
||||
if ((int) icm_size < 0) {
|
||||
if (icm_size < 0) {
|
||||
err = icm_size;
|
||||
goto err_stop_fw;
|
||||
}
|
||||
|
@ -359,12 +359,14 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
|
||||
int use_lowmem, int use_coherent)
|
||||
{
|
||||
struct mthca_icm_table *table;
|
||||
int obj_per_chunk;
|
||||
int num_icm;
|
||||
unsigned chunk_size;
|
||||
int i;
|
||||
u8 status;
|
||||
|
||||
num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
|
||||
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
|
||||
|
||||
table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
|
||||
if (!table)
|
||||
@ -412,7 +414,7 @@ err:
|
||||
if (table->icm[i]) {
|
||||
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
|
||||
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
|
||||
&status);
|
||||
&status);
|
||||
mthca_free_icm(dev, table->icm[i], table->coherent);
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ enum {
|
||||
MTHCA_NUM_PDS = 1 << 15
|
||||
};
|
||||
|
||||
u64 mthca_make_profile(struct mthca_dev *dev,
|
||||
s64 mthca_make_profile(struct mthca_dev *dev,
|
||||
struct mthca_profile *request,
|
||||
struct mthca_dev_lim *dev_lim,
|
||||
struct mthca_init_hca_param *init_hca)
|
||||
@ -77,7 +77,7 @@ u64 mthca_make_profile(struct mthca_dev *dev,
|
||||
};
|
||||
|
||||
u64 mem_base, mem_avail;
|
||||
u64 total_size = 0;
|
||||
s64 total_size = 0;
|
||||
struct mthca_resource *profile;
|
||||
struct mthca_resource tmp;
|
||||
int i, j;
|
||||
|
@ -53,7 +53,7 @@ struct mthca_profile {
|
||||
int fmr_reserved_mtts;
|
||||
};
|
||||
|
||||
u64 mthca_make_profile(struct mthca_dev *mdev,
|
||||
s64 mthca_make_profile(struct mthca_dev *mdev,
|
||||
struct mthca_profile *request,
|
||||
struct mthca_dev_lim *dev_lim,
|
||||
struct mthca_init_hca_param *init_hca);
|
||||
|
@ -60,7 +60,7 @@ static int mthca_query_device(struct ib_device *ibdev,
|
||||
struct ib_smp *in_mad = NULL;
|
||||
struct ib_smp *out_mad = NULL;
|
||||
int err = -ENOMEM;
|
||||
struct mthca_dev* mdev = to_mdev(ibdev);
|
||||
struct mthca_dev *mdev = to_mdev(ibdev);
|
||||
|
||||
u8 status;
|
||||
|
||||
@ -540,6 +540,9 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
|
||||
struct mthca_qp *qp;
|
||||
int err;
|
||||
|
||||
if (init_attr->create_flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
case IB_QPT_UC:
|
||||
|
@ -437,29 +437,34 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
|
||||
int mthca_state;
|
||||
u8 status;
|
||||
|
||||
mutex_lock(&qp->mutex);
|
||||
|
||||
if (qp->state == IB_QPS_RESET) {
|
||||
qp_attr->qp_state = IB_QPS_RESET;
|
||||
goto done;
|
||||
}
|
||||
|
||||
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
if (IS_ERR(mailbox)) {
|
||||
err = PTR_ERR(mailbox);
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
|
||||
if (err)
|
||||
goto out;
|
||||
goto out_mailbox;
|
||||
if (status) {
|
||||
mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto out_mailbox;
|
||||
}
|
||||
|
||||
qp_param = mailbox->buf;
|
||||
context = &qp_param->context;
|
||||
mthca_state = be32_to_cpu(context->flags) >> 28;
|
||||
|
||||
qp_attr->qp_state = to_ib_qp_state(mthca_state);
|
||||
qp->state = to_ib_qp_state(mthca_state);
|
||||
qp_attr->qp_state = qp->state;
|
||||
qp_attr->path_mtu = context->mtu_msgmax >> 5;
|
||||
qp_attr->path_mig_state =
|
||||
to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
|
||||
@ -506,8 +511,11 @@ done:
|
||||
|
||||
qp_init_attr->cap = qp_attr->cap;
|
||||
|
||||
out:
|
||||
out_mailbox:
|
||||
mthca_free_mailbox(dev, mailbox);
|
||||
|
||||
out:
|
||||
mutex_unlock(&qp->mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1532,7 +1540,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
|
||||
case IB_WR_SEND_WITH_IMM:
|
||||
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
|
||||
sqp->ud_header.immediate_present = 1;
|
||||
sqp->ud_header.immediate_data = wr->imm_data;
|
||||
sqp->ud_header.immediate_data = wr->ex.imm_data;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -1679,7 +1687,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
cpu_to_be32(1);
|
||||
if (wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
|
||||
((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
|
||||
((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
|
||||
|
||||
wqe += sizeof (struct mthca_next_seg);
|
||||
size = sizeof (struct mthca_next_seg) / 16;
|
||||
@ -2015,10 +2023,12 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
|
||||
((wr->send_flags & IB_SEND_SOLICITED) ?
|
||||
cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
|
||||
((wr->send_flags & IB_SEND_IP_CSUM) ?
|
||||
cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
|
||||
cpu_to_be32(1);
|
||||
if (wr->opcode == IB_WR_SEND_WITH_IMM ||
|
||||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
|
||||
((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
|
||||
((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
|
||||
|
||||
wqe += sizeof (struct mthca_next_seg);
|
||||
size = sizeof (struct mthca_next_seg) / 16;
|
||||
|
@ -38,14 +38,16 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
enum {
|
||||
MTHCA_NEXT_DBD = 1 << 7,
|
||||
MTHCA_NEXT_FENCE = 1 << 6,
|
||||
MTHCA_NEXT_CQ_UPDATE = 1 << 3,
|
||||
MTHCA_NEXT_EVENT_GEN = 1 << 2,
|
||||
MTHCA_NEXT_SOLICIT = 1 << 1,
|
||||
MTHCA_NEXT_DBD = 1 << 7,
|
||||
MTHCA_NEXT_FENCE = 1 << 6,
|
||||
MTHCA_NEXT_CQ_UPDATE = 1 << 3,
|
||||
MTHCA_NEXT_EVENT_GEN = 1 << 2,
|
||||
MTHCA_NEXT_SOLICIT = 1 << 1,
|
||||
MTHCA_NEXT_IP_CSUM = 1 << 4,
|
||||
MTHCA_NEXT_TCP_UDP_CSUM = 1 << 5,
|
||||
|
||||
MTHCA_MLX_VL15 = 1 << 17,
|
||||
MTHCA_MLX_SLR = 1 << 16
|
||||
MTHCA_MLX_VL15 = 1 << 17,
|
||||
MTHCA_MLX_SLR = 1 << 16
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -65,7 +65,6 @@ MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
int max_mtu = 9000;
|
||||
int nics_per_function = 1;
|
||||
int interrupt_mod_interval = 0;
|
||||
|
||||
|
||||
@ -93,15 +92,9 @@ module_param_named(debug_level, nes_debug_level, uint, 0644);
|
||||
MODULE_PARM_DESC(debug_level, "Enable debug output level");
|
||||
|
||||
LIST_HEAD(nes_adapter_list);
|
||||
LIST_HEAD(nes_dev_list);
|
||||
static LIST_HEAD(nes_dev_list);
|
||||
|
||||
atomic_t qps_destroyed;
|
||||
atomic_t cqp_reqs_allocated;
|
||||
atomic_t cqp_reqs_freed;
|
||||
atomic_t cqp_reqs_dynallocated;
|
||||
atomic_t cqp_reqs_dynfreed;
|
||||
atomic_t cqp_reqs_queued;
|
||||
atomic_t cqp_reqs_redriven;
|
||||
|
||||
static void nes_print_macaddr(struct net_device *netdev);
|
||||
static irqreturn_t nes_interrupt(int, void *);
|
||||
@ -310,7 +303,7 @@ void nes_rem_ref(struct ib_qp *ibqp)
|
||||
|
||||
if (atomic_read(&nesqp->refcount) == 0) {
|
||||
printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
|
||||
__FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
|
||||
__func__, ibqp->qp_num, nesqp->last_aeq);
|
||||
BUG();
|
||||
}
|
||||
|
||||
@ -751,13 +744,13 @@ static void __devexit nes_remove(struct pci_dev *pcidev)
|
||||
|
||||
list_del(&nesdev->list);
|
||||
nes_destroy_cqp(nesdev);
|
||||
|
||||
free_irq(pcidev->irq, nesdev);
|
||||
tasklet_kill(&nesdev->dpc_tasklet);
|
||||
|
||||
/* Deallocate the Adapter Structure */
|
||||
nes_destroy_adapter(nesdev->nesadapter);
|
||||
|
||||
free_irq(pcidev->irq, nesdev);
|
||||
|
||||
if (nesdev->msi_enabled) {
|
||||
pci_disable_msi(pcidev);
|
||||
}
|
||||
|
@ -143,12 +143,12 @@
|
||||
#ifdef CONFIG_INFINIBAND_NES_DEBUG
|
||||
#define nes_debug(level, fmt, args...) \
|
||||
if (level & nes_debug_level) \
|
||||
printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
|
||||
printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args)
|
||||
|
||||
#define assert(expr) \
|
||||
if (!(expr)) { \
|
||||
printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
|
||||
#expr, __FILE__, __FUNCTION__, __LINE__); \
|
||||
#expr, __FILE__, __func__, __LINE__); \
|
||||
}
|
||||
|
||||
#define NES_EVENT_TIMEOUT 1200000
|
||||
@ -166,7 +166,6 @@ if (!(expr)) { \
|
||||
#include "nes_cm.h"
|
||||
|
||||
extern int max_mtu;
|
||||
extern int nics_per_function;
|
||||
#define max_frame_len (max_mtu+ETH_HLEN)
|
||||
extern int interrupt_mod_interval;
|
||||
extern int nes_if_count;
|
||||
@ -177,9 +176,6 @@ extern unsigned int nes_drv_opt;
|
||||
extern unsigned int nes_debug_level;
|
||||
|
||||
extern struct list_head nes_adapter_list;
|
||||
extern struct list_head nes_dev_list;
|
||||
|
||||
extern struct nes_cm_core *g_cm_core;
|
||||
|
||||
extern atomic_t cm_connects;
|
||||
extern atomic_t cm_accepts;
|
||||
@ -209,7 +205,6 @@ extern atomic_t cm_nodes_destroyed;
|
||||
extern atomic_t cm_accel_dropped_pkts;
|
||||
extern atomic_t cm_resets_recvd;
|
||||
|
||||
extern u32 crit_err_count;
|
||||
extern u32 int_mod_timer_init;
|
||||
extern u32 int_mod_cq_depth_256;
|
||||
extern u32 int_mod_cq_depth_128;
|
||||
@ -219,14 +214,6 @@ extern u32 int_mod_cq_depth_16;
|
||||
extern u32 int_mod_cq_depth_4;
|
||||
extern u32 int_mod_cq_depth_1;
|
||||
|
||||
extern atomic_t cqp_reqs_allocated;
|
||||
extern atomic_t cqp_reqs_freed;
|
||||
extern atomic_t cqp_reqs_dynallocated;
|
||||
extern atomic_t cqp_reqs_dynfreed;
|
||||
extern atomic_t cqp_reqs_queued;
|
||||
extern atomic_t cqp_reqs_redriven;
|
||||
|
||||
|
||||
struct nes_device {
|
||||
struct nes_adapter *nesadapter;
|
||||
void __iomem *regs;
|
||||
@ -412,7 +399,7 @@ static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
|
||||
if (resource_num >= max_resources) {
|
||||
resource_num = find_first_zero_bit(resource_array, max_resources);
|
||||
if (resource_num >= max_resources) {
|
||||
printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
|
||||
printk(KERN_ERR PFX "%s: No available resourcess.\n", __func__);
|
||||
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
|
||||
return -EMFILE;
|
||||
}
|
||||
@ -510,9 +497,6 @@ struct ib_qp *nes_get_qp(struct ib_device *, int);
|
||||
/* nes_hw.c */
|
||||
struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
|
||||
void nes_nic_init_timer_defaults(struct nes_device *, u8);
|
||||
unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
|
||||
int nes_init_serdes(struct nes_device *, u8, u8, u8);
|
||||
void nes_init_csr_ne020(struct nes_device *, u8, u8);
|
||||
void nes_destroy_adapter(struct nes_adapter *);
|
||||
int nes_init_cqp(struct nes_device *);
|
||||
int nes_init_phy(struct nes_device *);
|
||||
@ -520,20 +504,12 @@ int nes_init_nic_qp(struct nes_device *, struct net_device *);
|
||||
void nes_destroy_nic_qp(struct nes_vnic *);
|
||||
int nes_napi_isr(struct nes_device *);
|
||||
void nes_dpc(unsigned long);
|
||||
void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
|
||||
void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
|
||||
void nes_process_mac_intr(struct nes_device *, u32);
|
||||
void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
|
||||
void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
|
||||
void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
|
||||
void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
|
||||
void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
|
||||
int nes_destroy_cqp(struct nes_device *);
|
||||
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
|
||||
|
||||
/* nes_nic.c */
|
||||
void nes_netdev_set_multicast_list(struct net_device *);
|
||||
void nes_netdev_exit(struct nes_vnic *);
|
||||
struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
|
||||
void nes_netdev_destroy(struct net_device *);
|
||||
int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
|
||||
@ -544,7 +520,6 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
|
||||
void nes_update_arp(unsigned char *, u32, u32, u16, u16);
|
||||
void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
|
||||
void nes_sock_release(struct nes_qp *, unsigned long *);
|
||||
struct nes_cm_core *nes_cm_alloc_core(void);
|
||||
void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
|
||||
int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
|
||||
int nes_cm_disconn(struct nes_qp *);
|
||||
@ -556,7 +531,6 @@ int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
|
||||
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
|
||||
void nes_destroy_ofa_device(struct nes_ib_device *);
|
||||
int nes_register_ofa_device(struct nes_ib_device *);
|
||||
void nes_unregister_ofa_device(struct nes_ib_device *);
|
||||
|
||||
/* nes_util.c */
|
||||
int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
|
||||
|
@ -80,7 +80,30 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
|
||||
static int add_ref_cm_node(struct nes_cm_node *);
|
||||
static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
|
||||
static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
|
||||
static struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
|
||||
void *, u32, void *, u32, u8);
|
||||
static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node);
|
||||
|
||||
static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *,
|
||||
struct nes_vnic *,
|
||||
struct ietf_mpa_frame *,
|
||||
struct nes_cm_info *);
|
||||
static int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *,
|
||||
struct nes_cm_node *);
|
||||
static int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *,
|
||||
struct nes_cm_node *);
|
||||
static int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
|
||||
static int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *,
|
||||
struct sk_buff *);
|
||||
static int mini_cm_dealloc_core(struct nes_cm_core *);
|
||||
static int mini_cm_get(struct nes_cm_core *);
|
||||
static int mini_cm_set(struct nes_cm_core *, u32, u32);
|
||||
static int nes_cm_disconn_true(struct nes_qp *);
|
||||
static int nes_cm_post_event(struct nes_cm_event *event);
|
||||
static int nes_disconnect(struct nes_qp *nesqp, int abrupt);
|
||||
static void nes_disconnect_worker(struct work_struct *work);
|
||||
static int send_ack(struct nes_cm_node *cm_node);
|
||||
static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb);
|
||||
|
||||
/* External CM API Interface */
|
||||
/* instance of function pointers for client API */
|
||||
@ -99,7 +122,7 @@ static struct nes_cm_ops nes_cm_api = {
|
||||
mini_cm_set
|
||||
};
|
||||
|
||||
struct nes_cm_core *g_cm_core;
|
||||
static struct nes_cm_core *g_cm_core;
|
||||
|
||||
atomic_t cm_connects;
|
||||
atomic_t cm_accepts;
|
||||
@ -149,7 +172,7 @@ static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
|
||||
/**
|
||||
* send_mpa_request
|
||||
*/
|
||||
int send_mpa_request(struct nes_cm_node *cm_node)
|
||||
static int send_mpa_request(struct nes_cm_node *cm_node)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret;
|
||||
@ -243,8 +266,9 @@ static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb
|
||||
* form_cm_frame - get a free packet and build empty frame Use
|
||||
* node info to build.
|
||||
*/
|
||||
struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
|
||||
void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
|
||||
static struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
|
||||
void *options, u32 optionsize, void *data,
|
||||
u32 datasize, u8 flags)
|
||||
{
|
||||
struct tcphdr *tcph;
|
||||
struct iphdr *iph;
|
||||
@ -342,7 +366,6 @@ static void print_core(struct nes_cm_core *core)
|
||||
if (!core)
|
||||
return;
|
||||
nes_debug(NES_DBG_CM, "---------------------------------------------\n");
|
||||
nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
|
||||
|
||||
nes_debug(NES_DBG_CM, "State : %u \n", core->state);
|
||||
|
||||
@ -395,7 +418,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (type == NES_TIMER_TYPE_SEND) {
|
||||
new_send->seq_num = htonl(tcp_hdr(skb)->seq);
|
||||
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
|
||||
atomic_inc(&new_send->skb->users);
|
||||
|
||||
ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
|
||||
@ -420,7 +443,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
|
||||
}
|
||||
if (type == NES_TIMER_TYPE_RECV) {
|
||||
new_send->seq_num = htonl(tcp_hdr(skb)->seq);
|
||||
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
|
||||
new_send->timetosend = jiffies;
|
||||
spin_lock_irqsave(&cm_node->recv_list_lock, flags);
|
||||
list_add_tail(&new_send->list, &cm_node->recv_list);
|
||||
@ -442,7 +465,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
/**
|
||||
* nes_cm_timer_tick
|
||||
*/
|
||||
void nes_cm_timer_tick(unsigned long pass)
|
||||
static void nes_cm_timer_tick(unsigned long pass)
|
||||
{
|
||||
unsigned long flags, qplockflags;
|
||||
unsigned long nexttimeout = jiffies + NES_LONG_TIME;
|
||||
@ -644,7 +667,7 @@ void nes_cm_timer_tick(unsigned long pass)
|
||||
/**
|
||||
* send_syn
|
||||
*/
|
||||
int send_syn(struct nes_cm_node *cm_node, u32 sendack)
|
||||
static int send_syn(struct nes_cm_node *cm_node, u32 sendack)
|
||||
{
|
||||
int ret;
|
||||
int flags = SET_SYN;
|
||||
@ -710,7 +733,7 @@ int send_syn(struct nes_cm_node *cm_node, u32 sendack)
|
||||
/**
|
||||
* send_reset
|
||||
*/
|
||||
int send_reset(struct nes_cm_node *cm_node)
|
||||
static int send_reset(struct nes_cm_node *cm_node)
|
||||
{
|
||||
int ret;
|
||||
struct sk_buff *skb = get_free_pkt(cm_node);
|
||||
@ -732,7 +755,7 @@ int send_reset(struct nes_cm_node *cm_node)
|
||||
/**
|
||||
* send_ack
|
||||
*/
|
||||
int send_ack(struct nes_cm_node *cm_node)
|
||||
static int send_ack(struct nes_cm_node *cm_node)
|
||||
{
|
||||
int ret;
|
||||
struct sk_buff *skb = get_free_pkt(cm_node);
|
||||
@ -752,7 +775,7 @@ int send_ack(struct nes_cm_node *cm_node)
|
||||
/**
|
||||
* send_fin
|
||||
*/
|
||||
int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
|
||||
static int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -775,7 +798,7 @@ int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
|
||||
/**
|
||||
* get_free_pkt
|
||||
*/
|
||||
struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
|
||||
static struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
|
||||
{
|
||||
struct sk_buff *skb, *new_skb;
|
||||
|
||||
@ -820,7 +843,6 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 hashkey;
|
||||
struct list_head *list_pos;
|
||||
struct list_head *hte;
|
||||
struct nes_cm_node *cm_node;
|
||||
|
||||
@ -835,8 +857,7 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
|
||||
|
||||
/* walk list and find cm_node associated with this session ID */
|
||||
spin_lock_irqsave(&cm_core->ht_lock, flags);
|
||||
list_for_each(list_pos, hte) {
|
||||
cm_node = container_of(list_pos, struct nes_cm_node, list);
|
||||
list_for_each_entry(cm_node, hte, list) {
|
||||
/* compare quad, return node handle if a match */
|
||||
nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
|
||||
cm_node->loc_addr, cm_node->loc_port,
|
||||
@ -864,13 +885,11 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
|
||||
nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct list_head *listen_list;
|
||||
struct nes_cm_listener *listen_node;
|
||||
|
||||
/* walk list and find cm_node associated with this session ID */
|
||||
spin_lock_irqsave(&cm_core->listen_list_lock, flags);
|
||||
list_for_each(listen_list, &cm_core->listen_list.list) {
|
||||
listen_node = container_of(listen_list, struct nes_cm_listener, list);
|
||||
list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
|
||||
/* compare node pair, return node handle if a match */
|
||||
if (((listen_node->loc_addr == dst_addr) ||
|
||||
listen_node->loc_addr == 0x00000000) &&
|
||||
@ -1014,7 +1033,7 @@ static void nes_addr_send_arp(u32 dst_ip)
|
||||
fl.nl_u.ip4_u.daddr = htonl(dst_ip);
|
||||
if (ip_route_output_key(&init_net, &rt, &fl)) {
|
||||
printk("%s: ip_route_output_key failed for 0x%08X\n",
|
||||
__FUNCTION__, dst_ip);
|
||||
__func__, dst_ip);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1077,8 +1096,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
|
||||
cm_node->tcp_cntxt.rcv_nxt = 0;
|
||||
/* get a unique session ID , add thread_id to an upcounter to handle race */
|
||||
atomic_inc(&cm_core->node_cnt);
|
||||
atomic_inc(&cm_core->session_id);
|
||||
cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
|
||||
cm_node->conn_type = cm_info->conn_type;
|
||||
cm_node->apbvt_set = 0;
|
||||
cm_node->accept_pend = 0;
|
||||
@ -1239,7 +1256,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
|
||||
continue;
|
||||
case OPTION_NUMBER_MSS:
|
||||
nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
|
||||
__FUNCTION__,
|
||||
__func__,
|
||||
all_options->as_mss.length, offset, optionsize);
|
||||
got_mss_option = 1;
|
||||
if (all_options->as_mss.length != 4) {
|
||||
@ -1272,8 +1289,8 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 opti
|
||||
/**
|
||||
* process_packet
|
||||
*/
|
||||
int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
struct nes_cm_core *cm_core)
|
||||
static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
struct nes_cm_core *cm_core)
|
||||
{
|
||||
int optionsize;
|
||||
int datasize;
|
||||
@ -1360,7 +1377,7 @@ int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
|
||||
if (optionsize) {
|
||||
u8 *optionsloc = (u8 *)&tcph[1];
|
||||
if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
|
||||
nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
|
||||
nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __func__, cm_node);
|
||||
send_reset(cm_node);
|
||||
if (cm_node->state != NES_CM_STATE_SYN_SENT)
|
||||
rem_ref_cm_node(cm_core, cm_node);
|
||||
@ -1605,9 +1622,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
|
||||
listener->cm_core = cm_core;
|
||||
listener->nesvnic = nesvnic;
|
||||
atomic_inc(&cm_core->node_cnt);
|
||||
atomic_inc(&cm_core->session_id);
|
||||
|
||||
listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
|
||||
listener->conn_type = cm_info->conn_type;
|
||||
listener->backlog = cm_info->backlog;
|
||||
listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
|
||||
@ -1631,9 +1646,10 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
|
||||
/**
|
||||
* mini_cm_connect - make a connection node with params
|
||||
*/
|
||||
struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
|
||||
struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_info *cm_info)
|
||||
static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
|
||||
struct nes_vnic *nesvnic,
|
||||
struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_info *cm_info)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nes_cm_node *cm_node;
|
||||
@ -1717,8 +1733,8 @@ struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
|
||||
* mini_cm_accept - accept a connection
|
||||
* This function is never called
|
||||
*/
|
||||
int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_node *cm_node)
|
||||
static int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_node *cm_node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
@ -1727,9 +1743,9 @@ int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame
|
||||
/**
|
||||
* mini_cm_reject - reject and teardown a connection
|
||||
*/
|
||||
int mini_cm_reject(struct nes_cm_core *cm_core,
|
||||
struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_node *cm_node)
|
||||
static int mini_cm_reject(struct nes_cm_core *cm_core,
|
||||
struct ietf_mpa_frame *mpa_frame,
|
||||
struct nes_cm_node *cm_node)
|
||||
{
|
||||
int ret = 0;
|
||||
struct sk_buff *skb;
|
||||
@ -1761,7 +1777,7 @@ int mini_cm_reject(struct nes_cm_core *cm_core,
|
||||
/**
|
||||
* mini_cm_close
|
||||
*/
|
||||
int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
|
||||
static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -1808,8 +1824,8 @@ int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
|
||||
* recv_pkt - recv an ETHERNET packet, and process it through CM
|
||||
* node state machine
|
||||
*/
|
||||
int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
|
||||
struct sk_buff *skb)
|
||||
static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct nes_cm_node *cm_node = NULL;
|
||||
struct nes_cm_listener *listener = NULL;
|
||||
@ -1898,7 +1914,7 @@ int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
|
||||
/**
|
||||
* nes_cm_alloc_core - allocate a top level instance of a cm core
|
||||
*/
|
||||
struct nes_cm_core *nes_cm_alloc_core(void)
|
||||
static struct nes_cm_core *nes_cm_alloc_core(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1919,7 +1935,6 @@ struct nes_cm_core *nes_cm_alloc_core(void)
|
||||
cm_core->state = NES_CM_STATE_INITED;
|
||||
cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
|
||||
|
||||
atomic_set(&cm_core->session_id, 0);
|
||||
atomic_set(&cm_core->events_posted, 0);
|
||||
|
||||
/* init the packet lists */
|
||||
@ -1958,7 +1973,7 @@ struct nes_cm_core *nes_cm_alloc_core(void)
|
||||
/**
|
||||
* mini_cm_dealloc_core - deallocate a top level instance of a cm core
|
||||
*/
|
||||
int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
|
||||
static int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
|
||||
{
|
||||
nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
|
||||
|
||||
@ -1983,7 +1998,7 @@ int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
|
||||
/**
|
||||
* mini_cm_get
|
||||
*/
|
||||
int mini_cm_get(struct nes_cm_core *cm_core)
|
||||
static int mini_cm_get(struct nes_cm_core *cm_core)
|
||||
{
|
||||
return cm_core->state;
|
||||
}
|
||||
@ -1992,7 +2007,7 @@ int mini_cm_get(struct nes_cm_core *cm_core)
|
||||
/**
|
||||
* mini_cm_set
|
||||
*/
|
||||
int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
|
||||
static int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -2109,7 +2124,7 @@ int nes_cm_disconn(struct nes_qp *nesqp)
|
||||
/**
|
||||
* nes_disconnect_worker
|
||||
*/
|
||||
void nes_disconnect_worker(struct work_struct *work)
|
||||
static void nes_disconnect_worker(struct work_struct *work)
|
||||
{
|
||||
struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
|
||||
|
||||
@ -2122,7 +2137,7 @@ void nes_disconnect_worker(struct work_struct *work)
|
||||
/**
|
||||
* nes_cm_disconn_true
|
||||
*/
|
||||
int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
@ -2265,7 +2280,7 @@ int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
/**
|
||||
* nes_disconnect
|
||||
*/
|
||||
int nes_disconnect(struct nes_qp *nesqp, int abrupt)
|
||||
static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nes_vnic *nesvnic;
|
||||
@ -2482,7 +2497,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
}
|
||||
if (ret)
|
||||
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
|
||||
__FUNCTION__, __LINE__, ret);
|
||||
__func__, __LINE__, ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2650,7 +2665,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
|
||||
cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
|
||||
if (!cm_node) {
|
||||
printk("%s[%u] Error returned from listen API call\n",
|
||||
__FUNCTION__, __LINE__);
|
||||
__func__, __LINE__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -2740,7 +2755,7 @@ int nes_cm_stop(void)
|
||||
* cm_event_connected
|
||||
* handle a connected event, setup QPs and HW
|
||||
*/
|
||||
void cm_event_connected(struct nes_cm_event *event)
|
||||
static void cm_event_connected(struct nes_cm_event *event)
|
||||
{
|
||||
u64 u64temp;
|
||||
struct nes_qp *nesqp;
|
||||
@ -2864,7 +2879,7 @@ void cm_event_connected(struct nes_cm_event *event)
|
||||
|
||||
if (ret)
|
||||
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
|
||||
__FUNCTION__, __LINE__, ret);
|
||||
__func__, __LINE__, ret);
|
||||
nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
|
||||
nesqp->hwqp.qp_id, jiffies );
|
||||
|
||||
@ -2877,7 +2892,7 @@ void cm_event_connected(struct nes_cm_event *event)
|
||||
/**
|
||||
* cm_event_connect_error
|
||||
*/
|
||||
void cm_event_connect_error(struct nes_cm_event *event)
|
||||
static void cm_event_connect_error(struct nes_cm_event *event)
|
||||
{
|
||||
struct nes_qp *nesqp;
|
||||
struct iw_cm_id *cm_id;
|
||||
@ -2919,7 +2934,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
|
||||
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
|
||||
if (ret)
|
||||
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
|
||||
__FUNCTION__, __LINE__, ret);
|
||||
__func__, __LINE__, ret);
|
||||
nes_rem_ref(&nesqp->ibqp);
|
||||
cm_id->rem_ref(cm_id);
|
||||
|
||||
@ -2930,7 +2945,7 @@ void cm_event_connect_error(struct nes_cm_event *event)
|
||||
/**
|
||||
* cm_event_reset
|
||||
*/
|
||||
void cm_event_reset(struct nes_cm_event *event)
|
||||
static void cm_event_reset(struct nes_cm_event *event)
|
||||
{
|
||||
struct nes_qp *nesqp;
|
||||
struct iw_cm_id *cm_id;
|
||||
@ -2973,7 +2988,7 @@ void cm_event_reset(struct nes_cm_event *event)
|
||||
/**
|
||||
* cm_event_mpa_req
|
||||
*/
|
||||
void cm_event_mpa_req(struct nes_cm_event *event)
|
||||
static void cm_event_mpa_req(struct nes_cm_event *event)
|
||||
{
|
||||
struct iw_cm_id *cm_id;
|
||||
struct iw_cm_event cm_event;
|
||||
@ -3007,7 +3022,7 @@ void cm_event_mpa_req(struct nes_cm_event *event)
|
||||
ret = cm_id->event_handler(cm_id, &cm_event);
|
||||
if (ret)
|
||||
printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
|
||||
__FUNCTION__, __LINE__, ret);
|
||||
__func__, __LINE__, ret);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -3019,7 +3034,7 @@ static void nes_cm_event_handler(struct work_struct *);
|
||||
* nes_cm_post_event
|
||||
* post an event to the cm event handler
|
||||
*/
|
||||
int nes_cm_post_event(struct nes_cm_event *event)
|
||||
static int nes_cm_post_event(struct nes_cm_event *event)
|
||||
{
|
||||
atomic_inc(&event->cm_node->cm_core->events_posted);
|
||||
add_ref_cm_node(event->cm_node);
|
||||
|
@ -225,7 +225,6 @@ enum nes_cm_listener_state {
|
||||
|
||||
struct nes_cm_listener {
|
||||
struct list_head list;
|
||||
u64 session_id;
|
||||
struct nes_cm_core *cm_core;
|
||||
u8 loc_mac[ETH_ALEN];
|
||||
nes_addr_t loc_addr;
|
||||
@ -242,7 +241,6 @@ struct nes_cm_listener {
|
||||
|
||||
/* per connection node and node state information */
|
||||
struct nes_cm_node {
|
||||
u64 session_id;
|
||||
u32 hashkey;
|
||||
|
||||
nes_addr_t loc_addr, rem_addr;
|
||||
@ -327,7 +325,6 @@ struct nes_cm_event {
|
||||
|
||||
struct nes_cm_core {
|
||||
enum nes_cm_node_state state;
|
||||
atomic_t session_id;
|
||||
|
||||
atomic_t listen_node_cnt;
|
||||
struct nes_cm_node listen_list;
|
||||
@ -383,35 +380,10 @@ struct nes_cm_ops {
|
||||
int (*set)(struct nes_cm_core *, u32, u32);
|
||||
};
|
||||
|
||||
|
||||
int send_mpa_request(struct nes_cm_node *);
|
||||
struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
|
||||
void *, u32, void *, u32, u8);
|
||||
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
|
||||
enum nes_timer_type, int, int);
|
||||
void nes_cm_timer_tick(unsigned long);
|
||||
int send_syn(struct nes_cm_node *, u32);
|
||||
int send_reset(struct nes_cm_node *);
|
||||
int send_ack(struct nes_cm_node *);
|
||||
int send_fin(struct nes_cm_node *, struct sk_buff *);
|
||||
struct sk_buff *get_free_pkt(struct nes_cm_node *);
|
||||
int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
|
||||
|
||||
struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
|
||||
struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
|
||||
int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
|
||||
int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
|
||||
int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
|
||||
int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
|
||||
struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
|
||||
int mini_cm_dealloc_core(struct nes_cm_core *);
|
||||
int mini_cm_get(struct nes_cm_core *);
|
||||
int mini_cm_set(struct nes_cm_core *, u32, u32);
|
||||
|
||||
int nes_cm_disconn(struct nes_qp *);
|
||||
void nes_disconnect_worker(struct work_struct *);
|
||||
int nes_cm_disconn_true(struct nes_qp *);
|
||||
int nes_disconnect(struct nes_qp *, int);
|
||||
|
||||
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
|
||||
int nes_reject(struct iw_cm_id *, const void *, u8);
|
||||
@ -423,11 +395,4 @@ int nes_cm_recv(struct sk_buff *, struct net_device *);
|
||||
int nes_cm_start(void);
|
||||
int nes_cm_stop(void);
|
||||
|
||||
/* CM event handler functions */
|
||||
void cm_event_connected(struct nes_cm_event *);
|
||||
void cm_event_connect_error(struct nes_cm_event *);
|
||||
void cm_event_reset(struct nes_cm_event *);
|
||||
void cm_event_mpa_req(struct nes_cm_event *);
|
||||
int nes_cm_post_event(struct nes_cm_event *);
|
||||
|
||||
#endif /* NES_CM_H */
|
||||
|
@ -41,7 +41,7 @@
|
||||
|
||||
#include "nes.h"
|
||||
|
||||
u32 crit_err_count = 0;
|
||||
static u32 crit_err_count;
|
||||
u32 int_mod_timer_init;
|
||||
u32 int_mod_cq_depth_256;
|
||||
u32 int_mod_cq_depth_128;
|
||||
@ -53,6 +53,17 @@ u32 int_mod_cq_depth_1;
|
||||
|
||||
#include "nes_cm.h"
|
||||
|
||||
static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
|
||||
static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
|
||||
static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
|
||||
u8 OneG_Mode);
|
||||
static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
|
||||
static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
|
||||
static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
|
||||
static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
struct nes_hw_aeqe *aeqe);
|
||||
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
|
||||
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_NES_DEBUG
|
||||
static unsigned char *nes_iwarp_state_str[] = {
|
||||
@ -370,7 +381,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
||||
nesadapter->et_use_adaptive_rx_coalesce = 1;
|
||||
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
|
||||
nesadapter->et_rx_coalesce_usecs_irq = 0;
|
||||
printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__);
|
||||
printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __func__);
|
||||
}
|
||||
/* Setup and enable the periodic timer */
|
||||
if (nesadapter->et_rx_coalesce_usecs_irq)
|
||||
@ -382,7 +393,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
||||
nesadapter->base_pd = 1;
|
||||
|
||||
nesadapter->device_cap_flags =
|
||||
IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW;
|
||||
IB_DEVICE_ZERO_STAG | IB_DEVICE_MEM_WINDOW;
|
||||
|
||||
nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
|
||||
[(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
|
||||
@ -572,7 +583,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
||||
if (vendor_id == 0xffff)
|
||||
break;
|
||||
}
|
||||
nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__,
|
||||
nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __func__,
|
||||
func_index, pci_name(nesdev->pcidev));
|
||||
nesadapter->adapter_fcn_count = func_index;
|
||||
|
||||
@ -583,7 +594,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
|
||||
/**
|
||||
* nes_reset_adapter_ne020
|
||||
*/
|
||||
unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
|
||||
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
|
||||
{
|
||||
u32 port_count;
|
||||
u32 u32temp;
|
||||
@ -691,7 +702,8 @@ unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
|
||||
/**
|
||||
* nes_init_serdes
|
||||
*/
|
||||
int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode)
|
||||
static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
|
||||
u8 OneG_Mode)
|
||||
{
|
||||
int i;
|
||||
u32 u32temp;
|
||||
@ -739,7 +751,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
|
||||
& 0x0000000f)) != 0x0000000f) && (i++ < 5000))
|
||||
mdelay(1);
|
||||
if (i >= 5000) {
|
||||
printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp);
|
||||
printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
|
||||
/* return 1; */
|
||||
}
|
||||
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
|
||||
@ -760,7 +772,7 @@ int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 One
|
||||
* nes_init_csr_ne020
|
||||
* Initialize registers for ne020 hardware
|
||||
*/
|
||||
void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
|
||||
static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
|
||||
{
|
||||
u32 u32temp;
|
||||
|
||||
@ -1204,7 +1216,7 @@ int nes_init_phy(struct nes_device *nesdev)
|
||||
if (nesadapter->OneG_Mode) {
|
||||
nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
|
||||
if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
|
||||
printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__);
|
||||
printk(PFX "%s: Programming mdc config for 1G\n", __func__);
|
||||
tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
|
||||
tx_config |= 0x04;
|
||||
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
|
||||
@ -1358,7 +1370,7 @@ static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
|
||||
static void nes_rq_wqes_timeout(unsigned long parm)
|
||||
{
|
||||
struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
|
||||
printk("%s: Timer fired.\n", __FUNCTION__);
|
||||
printk("%s: Timer fired.\n", __func__);
|
||||
atomic_set(&nesvnic->rx_skb_timer_running, 0);
|
||||
if (atomic_read(&nesvnic->rx_skbs_needed))
|
||||
nes_replenish_nic_rq(nesvnic);
|
||||
@ -1909,7 +1921,7 @@ void nes_dpc(unsigned long param)
|
||||
/**
|
||||
* nes_process_ceq
|
||||
*/
|
||||
void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
|
||||
static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
|
||||
{
|
||||
u64 u64temp;
|
||||
struct nes_hw_cq *cq;
|
||||
@ -1949,7 +1961,7 @@ void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
|
||||
/**
|
||||
* nes_process_aeq
|
||||
*/
|
||||
void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
|
||||
static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
|
||||
{
|
||||
// u64 u64temp;
|
||||
u32 head;
|
||||
@ -2060,7 +2072,7 @@ static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
|
||||
/**
|
||||
* nes_process_mac_intr
|
||||
*/
|
||||
void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
||||
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 pcs_control_status;
|
||||
@ -2163,7 +2175,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
||||
temp_phy_data = phy_data;
|
||||
} while (1);
|
||||
nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
|
||||
__FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
|
||||
__func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
|
||||
|
||||
} else {
|
||||
phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
|
||||
@ -2205,7 +2217,7 @@ void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
|
||||
|
||||
|
||||
|
||||
void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
|
||||
static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
|
||||
{
|
||||
struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
|
||||
|
||||
@ -2428,7 +2440,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
|
||||
/**
|
||||
* nes_cqp_ce_handler
|
||||
*/
|
||||
void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
|
||||
static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
|
||||
{
|
||||
u64 u64temp;
|
||||
unsigned long flags;
|
||||
@ -2567,7 +2579,8 @@ void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
|
||||
/**
|
||||
* nes_process_iwarp_aeqe
|
||||
*/
|
||||
void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
|
||||
static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
struct nes_hw_aeqe *aeqe)
|
||||
{
|
||||
u64 context;
|
||||
u64 aeqe_context = 0;
|
||||
@ -2819,7 +2832,7 @@ void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
|
||||
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
|
||||
if (resource_allocated) {
|
||||
printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
|
||||
__FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
|
||||
__func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user