forked from Minki/linux
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB/cm: Optimize stale connection detection IB/mthca: Set cleaned CQEs back to HW ownership when cleaning CQ IB/mthca: Fix posting >255 recv WRs for Tavor RDMA/cma: Add check to validate that cm_id is bound to a device RDMA/cma: Fix synchronization with device removal in cma_iw_handler RDMA/cma: Simplify device removal handling code IB/ehca: Disable scaling code by default, bump version number IB/ehca: Beautify sysfs attribute code and fix compiler warnings IB/ehca: Remove _irqsave, move #ifdef IB/ehca: Fix AQP0/1 QP number IB/ehca: Correctly set GRH mask bit in ehca_modify_qp() IB/ehca: Serialize hypervisor calls in ehca_register_mr() IB/ipath: Shadow the gpio_mask register IB/mlx4: Fix uninitialized spinlock for 32-bit archs mlx4_core: Remove unused doorbell_lock net: Trivial MLX4_DEBUG dependency fix.
This commit is contained in:
commit
de7860c3f3
@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
|
||||
complete(&id_priv->comp);
|
||||
}
|
||||
|
||||
static void cma_release_remove(struct rdma_id_private *id_priv)
|
||||
static int cma_disable_remove(struct rdma_id_private *id_priv,
|
||||
enum cma_state state)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&id_priv->lock, flags);
|
||||
if (id_priv->state == state) {
|
||||
atomic_inc(&id_priv->dev_remove);
|
||||
ret = 0;
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
spin_unlock_irqrestore(&id_priv->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cma_enable_remove(struct rdma_id_private *id_priv)
|
||||
{
|
||||
if (atomic_dec_and_test(&id_priv->dev_remove))
|
||||
wake_up(&id_priv->wait_remove);
|
||||
}
|
||||
|
||||
static int cma_has_cm_dev(struct rdma_id_private *id_priv)
|
||||
{
|
||||
return (id_priv->id.device && id_priv->cm_id.ib);
|
||||
}
|
||||
|
||||
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
|
||||
void *context, enum rdma_port_space ps)
|
||||
{
|
||||
@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
struct rdma_cm_event event;
|
||||
int ret = 0;
|
||||
|
||||
atomic_inc(&id_priv->dev_remove);
|
||||
if (!cma_comp(id_priv, CMA_CONNECT))
|
||||
goto out;
|
||||
if (cma_disable_remove(id_priv, CMA_CONNECT))
|
||||
return 0;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
switch (ib_event->event) {
|
||||
@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
id_priv->cm_id.ib = NULL;
|
||||
cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
return ret;
|
||||
}
|
||||
out:
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
int offset, ret;
|
||||
|
||||
listen_id = cm_id->context;
|
||||
atomic_inc(&listen_id->dev_remove);
|
||||
if (!cma_comp(listen_id, CMA_LISTEN)) {
|
||||
ret = -ECONNABORTED;
|
||||
goto out;
|
||||
}
|
||||
if (cma_disable_remove(listen_id, CMA_LISTEN))
|
||||
return -ECONNABORTED;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
offset = cma_user_data_offset(listen_id->id.ps);
|
||||
@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
|
||||
release_conn_id:
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
cma_release_remove(conn_id);
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
|
||||
out:
|
||||
cma_release_remove(listen_id);
|
||||
cma_enable_remove(listen_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
||||
struct sockaddr_in *sin;
|
||||
int ret = 0;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
atomic_inc(&id_priv->dev_remove);
|
||||
if (cma_disable_remove(id_priv, CMA_CONNECT))
|
||||
return 0;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
switch (iw_event->event) {
|
||||
case IW_CM_EVENT_CLOSE:
|
||||
event.event = RDMA_CM_EVENT_DISCONNECTED;
|
||||
@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
id_priv->cm_id.iw = NULL;
|
||||
cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
int ret;
|
||||
|
||||
listen_id = cm_id->context;
|
||||
atomic_inc(&listen_id->dev_remove);
|
||||
if (!cma_comp(listen_id, CMA_LISTEN)) {
|
||||
ret = -ECONNABORTED;
|
||||
goto out;
|
||||
}
|
||||
if (cma_disable_remove(listen_id, CMA_LISTEN))
|
||||
return -ECONNABORTED;
|
||||
|
||||
/* Create a new RDMA id for the new IW CM ID */
|
||||
new_cm_id = rdma_create_id(listen_id->id.event_handler,
|
||||
@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
|
||||
if (!dev) {
|
||||
ret = -EADDRNOTAVAIL;
|
||||
cma_release_remove(conn_id);
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
goto out;
|
||||
}
|
||||
ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
|
||||
if (ret) {
|
||||
cma_release_remove(conn_id);
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
goto out;
|
||||
}
|
||||
@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
ret = cma_acquire_dev(conn_id);
|
||||
mutex_unlock(&lock);
|
||||
if (ret) {
|
||||
cma_release_remove(conn_id);
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(new_cm_id);
|
||||
goto out;
|
||||
}
|
||||
@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
|
||||
/* User wants to destroy the CM ID */
|
||||
conn_id->cm_id.iw = NULL;
|
||||
cma_exch(conn_id, CMA_DESTROYING);
|
||||
cma_release_remove(conn_id);
|
||||
cma_enable_remove(conn_id);
|
||||
rdma_destroy_id(&conn_id->id);
|
||||
}
|
||||
|
||||
out:
|
||||
if (dev)
|
||||
dev_put(dev);
|
||||
cma_release_remove(listen_id);
|
||||
cma_enable_remove(listen_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work)
|
||||
destroy = 1;
|
||||
}
|
||||
out:
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
cma_deref_id(id_priv);
|
||||
if (destroy)
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr,
|
||||
|
||||
if (id_priv->id.event_handler(&id_priv->id, &event)) {
|
||||
cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
cma_deref_id(id_priv);
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
return;
|
||||
}
|
||||
out:
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
cma_deref_id(id_priv);
|
||||
}
|
||||
|
||||
@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
||||
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
|
||||
int ret = 0;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
atomic_inc(&id_priv->dev_remove);
|
||||
if (!cma_comp(id_priv, CMA_CONNECT))
|
||||
goto out;
|
||||
if (cma_disable_remove(id_priv, CMA_CONNECT))
|
||||
return 0;
|
||||
|
||||
memset(&event, 0, sizeof event);
|
||||
switch (ib_event->event) {
|
||||
case IB_CM_SIDR_REQ_ERROR:
|
||||
event.event = RDMA_CM_EVENT_UNREACHABLE;
|
||||
@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
|
||||
/* Destroy the CM ID by returning a non-zero value. */
|
||||
id_priv->cm_id.ib = NULL;
|
||||
cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
return ret;
|
||||
}
|
||||
out:
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!cma_comp(id_priv, CMA_CONNECT))
|
||||
if (!cma_has_cm_dev(id_priv))
|
||||
return -EINVAL;
|
||||
|
||||
switch (id->device->node_type) {
|
||||
@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!cma_comp(id_priv, CMA_CONNECT))
|
||||
if (!cma_has_cm_dev(id_priv))
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
|
||||
int ret;
|
||||
|
||||
id_priv = container_of(id, struct rdma_id_private, id);
|
||||
if (!cma_comp(id_priv, CMA_CONNECT) &&
|
||||
!cma_comp(id_priv, CMA_DISCONNECT))
|
||||
if (!cma_has_cm_dev(id_priv))
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdma_node_get_transport(id->device->node_type)) {
|
||||
@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
int ret;
|
||||
|
||||
id_priv = mc->id_priv;
|
||||
atomic_inc(&id_priv->dev_remove);
|
||||
if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
|
||||
!cma_comp(id_priv, CMA_ADDR_RESOLVED))
|
||||
goto out;
|
||||
if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) &&
|
||||
cma_disable_remove(id_priv, CMA_ADDR_RESOLVED))
|
||||
return 0;
|
||||
|
||||
if (!status && id_priv->id.qp)
|
||||
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
|
||||
@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
|
||||
ret = id_priv->id.event_handler(&id_priv->id, &event);
|
||||
if (ret) {
|
||||
cma_exch(id_priv, CMA_DESTROYING);
|
||||
cma_release_remove(id_priv);
|
||||
cma_enable_remove(id_priv);
|
||||
rdma_destroy_id(&id_priv->id);
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
cma_release_remove(id_priv);
|
||||
|
||||
cma_enable_remove(id_priv);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void);
|
||||
|
||||
extern spinlock_t ehca_qp_idr_lock;
|
||||
extern spinlock_t ehca_cq_idr_lock;
|
||||
extern spinlock_t hcall_lock;
|
||||
extern struct idr ehca_qp_idr;
|
||||
extern struct idr ehca_cq_idr;
|
||||
|
||||
|
@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
|
||||
else {
|
||||
struct ehca_cq *cq = eq->eqe_cache[i].cq;
|
||||
comp_event_callback(cq);
|
||||
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
|
||||
spin_lock(&ehca_cq_idr_lock);
|
||||
cq->nr_events--;
|
||||
if (!cq->nr_events)
|
||||
wake_up(&cq->wait_completion);
|
||||
spin_unlock_irqrestore(&ehca_cq_idr_lock,
|
||||
flags);
|
||||
spin_unlock(&ehca_cq_idr_lock);
|
||||
}
|
||||
} else {
|
||||
ehca_dbg(&shca->ib_device, "Got non completion event");
|
||||
@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool,
|
||||
kthread_stop(task);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void take_over_work(struct ehca_comp_pool *pool,
|
||||
int cpu)
|
||||
{
|
||||
@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool,
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int comp_pool_callback(struct notifier_block *nfb,
|
||||
unsigned long action,
|
||||
void *hcpu)
|
||||
|
@ -52,7 +52,7 @@
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
|
||||
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
|
||||
MODULE_VERSION("SVNEHCA_0022");
|
||||
MODULE_VERSION("SVNEHCA_0023");
|
||||
|
||||
int ehca_open_aqp1 = 0;
|
||||
int ehca_debug_level = 0;
|
||||
@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0;
|
||||
int ehca_port_act_time = 30;
|
||||
int ehca_poll_all_eqs = 1;
|
||||
int ehca_static_rate = -1;
|
||||
int ehca_scaling_code = 1;
|
||||
int ehca_scaling_code = 0;
|
||||
|
||||
module_param_named(open_aqp1, ehca_open_aqp1, int, 0);
|
||||
module_param_named(debug_level, ehca_debug_level, int, 0);
|
||||
@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code,
|
||||
|
||||
spinlock_t ehca_qp_idr_lock;
|
||||
spinlock_t ehca_cq_idr_lock;
|
||||
spinlock_t hcall_lock;
|
||||
DEFINE_IDR(ehca_qp_idr);
|
||||
DEFINE_IDR(ehca_cq_idr);
|
||||
|
||||
@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp,
|
||||
DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
|
||||
ehca_show_debug_level, ehca_store_debug_level);
|
||||
|
||||
void ehca_create_driver_sysfs(struct ibmebus_driver *drv)
|
||||
{
|
||||
driver_create_file(&drv->driver, &driver_attr_debug_level);
|
||||
}
|
||||
static struct attribute *ehca_drv_attrs[] = {
|
||||
&driver_attr_debug_level.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
void ehca_remove_driver_sysfs(struct ibmebus_driver *drv)
|
||||
{
|
||||
driver_remove_file(&drv->driver, &driver_attr_debug_level);
|
||||
}
|
||||
static struct attribute_group ehca_drv_attr_grp = {
|
||||
.attrs = ehca_drv_attrs
|
||||
};
|
||||
|
||||
#define EHCA_RESOURCE_ATTR(name) \
|
||||
static ssize_t ehca_show_##name(struct device *dev, \
|
||||
@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
|
||||
|
||||
static struct attribute *ehca_dev_attrs[] = {
|
||||
&dev_attr_adapter_handle.attr,
|
||||
&dev_attr_num_ports.attr,
|
||||
&dev_attr_hw_ver.attr,
|
||||
&dev_attr_max_eq.attr,
|
||||
&dev_attr_cur_eq.attr,
|
||||
&dev_attr_max_cq.attr,
|
||||
&dev_attr_cur_cq.attr,
|
||||
&dev_attr_max_qp.attr,
|
||||
&dev_attr_cur_qp.attr,
|
||||
&dev_attr_max_mr.attr,
|
||||
&dev_attr_cur_mr.attr,
|
||||
&dev_attr_max_mw.attr,
|
||||
&dev_attr_cur_mw.attr,
|
||||
&dev_attr_max_pd.attr,
|
||||
&dev_attr_max_ah.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
void ehca_create_device_sysfs(struct ibmebus_dev *dev)
|
||||
{
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_num_ports);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_eq);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_cq);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_qp);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_mr);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_mw);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_pd);
|
||||
device_create_file(&dev->ofdev.dev, &dev_attr_max_ah);
|
||||
}
|
||||
|
||||
void ehca_remove_device_sysfs(struct ibmebus_dev *dev)
|
||||
{
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd);
|
||||
device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah);
|
||||
}
|
||||
static struct attribute_group ehca_dev_attr_grp = {
|
||||
.attrs = ehca_dev_attrs
|
||||
};
|
||||
|
||||
static int __devinit ehca_probe(struct ibmebus_dev *dev,
|
||||
const struct of_device_id *id)
|
||||
@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
ehca_create_device_sysfs(dev);
|
||||
ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
|
||||
if (ret) /* only complain; we can live without attributes */
|
||||
ehca_err(&shca->ib_device,
|
||||
"Cannot create device attributes ret=%d", ret);
|
||||
|
||||
spin_lock(&shca_list_lock);
|
||||
list_add(&shca->shca_list, &shca_list);
|
||||
@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev)
|
||||
struct ehca_shca *shca = dev->ofdev.dev.driver_data;
|
||||
int ret;
|
||||
|
||||
ehca_remove_device_sysfs(dev);
|
||||
sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp);
|
||||
|
||||
if (ehca_open_aqp1 == 1) {
|
||||
int i;
|
||||
@ -812,11 +799,12 @@ int __init ehca_module_init(void)
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO "eHCA Infiniband Device Driver "
|
||||
"(Rel.: SVNEHCA_0022)\n");
|
||||
"(Rel.: SVNEHCA_0023)\n");
|
||||
idr_init(&ehca_qp_idr);
|
||||
idr_init(&ehca_cq_idr);
|
||||
spin_lock_init(&ehca_qp_idr_lock);
|
||||
spin_lock_init(&ehca_cq_idr_lock);
|
||||
spin_lock_init(&hcall_lock);
|
||||
|
||||
INIT_LIST_HEAD(&shca_list);
|
||||
spin_lock_init(&shca_list_lock);
|
||||
@ -838,7 +826,9 @@ int __init ehca_module_init(void)
|
||||
goto module_init2;
|
||||
}
|
||||
|
||||
ehca_create_driver_sysfs(&ehca_driver);
|
||||
ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
|
||||
if (ret) /* only complain; we can live without attributes */
|
||||
ehca_gen_err("Cannot create driver attributes ret=%d", ret);
|
||||
|
||||
if (ehca_poll_all_eqs != 1) {
|
||||
ehca_gen_err("WARNING!!!");
|
||||
@ -865,7 +855,7 @@ void __exit ehca_module_exit(void)
|
||||
if (ehca_poll_all_eqs == 1)
|
||||
del_timer_sync(&poll_eqs_timer);
|
||||
|
||||
ehca_remove_driver_sysfs(&ehca_driver);
|
||||
sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
|
||||
ibmebus_unregister_driver(&ehca_driver);
|
||||
|
||||
ehca_destroy_slab_caches();
|
||||
|
@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
||||
goto create_qp_exit1;
|
||||
}
|
||||
|
||||
my_qp->ib_qp.qp_num = my_qp->real_qp_num;
|
||||
|
||||
switch (init_attr->qp_type) {
|
||||
case IB_QPT_RC:
|
||||
if (isdaqp == 0) {
|
||||
@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
||||
parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
|
||||
parms.act_nr_send_sges = init_attr->cap.max_send_sge;
|
||||
parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
|
||||
my_qp->real_qp_num =
|
||||
my_qp->ib_qp.qp_num =
|
||||
(init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
|
||||
}
|
||||
|
||||
@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
|
||||
my_qp->ib_qp.recv_cq = init_attr->recv_cq;
|
||||
my_qp->ib_qp.send_cq = init_attr->send_cq;
|
||||
|
||||
my_qp->ib_qp.qp_num = my_qp->real_qp_num;
|
||||
my_qp->ib_qp.qp_type = init_attr->qp_type;
|
||||
|
||||
my_qp->qp_type = init_attr->qp_type;
|
||||
@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp,
|
||||
((ehca_mult - 1) / ah_mult) : 0;
|
||||
else
|
||||
mqpcb->max_static_rate = 0;
|
||||
|
||||
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
|
||||
|
||||
/*
|
||||
* Always supply the GRH flag, even if it's zero, to give the
|
||||
* hypervisor a clear "yes" or "no" instead of a "perhaps"
|
||||
*/
|
||||
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
|
||||
|
||||
/*
|
||||
* only if GRH is TRUE we might consider SOURCE_GID_IDX
|
||||
* and DEST_GID otherwise phype will return H_ATTR_PARM!!!
|
||||
*/
|
||||
if (attr->ah_attr.ah_flags == IB_AH_GRH) {
|
||||
mqpcb->send_grh_flag = 1 << 31;
|
||||
update_mask |=
|
||||
EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
|
||||
mqpcb->send_grh_flag = 1;
|
||||
|
||||
mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
|
||||
update_mask |=
|
||||
EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
|
||||
|
@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode,
|
||||
unsigned long arg9)
|
||||
{
|
||||
long ret;
|
||||
int i, sleep_msecs;
|
||||
int i, sleep_msecs, lock_is_set = 0;
|
||||
unsigned long flags;
|
||||
|
||||
ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
|
||||
"arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
|
||||
@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode,
|
||||
arg8, arg9);
|
||||
|
||||
for (i = 0; i < 5; i++) {
|
||||
if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) {
|
||||
spin_lock_irqsave(&hcall_lock, flags);
|
||||
lock_is_set = 1;
|
||||
}
|
||||
|
||||
ret = plpar_hcall9(opcode, outs,
|
||||
arg1, arg2, arg3, arg4, arg5,
|
||||
arg6, arg7, arg8, arg9);
|
||||
|
||||
if (lock_is_set)
|
||||
spin_unlock_irqrestore(&hcall_lock, flags);
|
||||
|
||||
if (H_IS_LONG_BUSY(ret)) {
|
||||
sleep_msecs = get_longbusy_msecs(ret);
|
||||
msleep_interruptible(sleep_msecs);
|
||||
@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode,
|
||||
opcode, ret, outs[0], outs[1], outs[2], outs[3],
|
||||
outs[4], outs[5], outs[6], outs[7], outs[8]);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
return H_BUSY;
|
||||
}
|
||||
|
||||
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
|
||||
struct ehca_pfeq *pfeq,
|
||||
const u32 neq_control,
|
||||
|
@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
|
||||
|
||||
static int ipath_pe_intconfig(struct ipath_devdata *dd)
|
||||
{
|
||||
u64 val;
|
||||
u32 chiprev;
|
||||
|
||||
/*
|
||||
@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd)
|
||||
if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) {
|
||||
/* Rev2+ reports extra errors via internal GPIO pins */
|
||||
dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
|
||||
val |= IPATH_GPIO_ERRINTR_MASK;
|
||||
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
|
||||
dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
|
||||
dd->ipath_gpio_mask);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
|
||||
chk0rcv = 1;
|
||||
}
|
||||
if (unlikely(gpiostatus)) {
|
||||
if (gpiostatus) {
|
||||
/*
|
||||
* Some unexpected bits remain. If they could have
|
||||
* caused the interrupt, complain and clear.
|
||||
@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
* GPIO interrupts, possibly on a "three strikes"
|
||||
* basis.
|
||||
*/
|
||||
u32 mask;
|
||||
mask = ipath_read_kreg32(
|
||||
dd, dd->ipath_kregs->kr_gpio_mask);
|
||||
const u32 mask = (u32) dd->ipath_gpio_mask;
|
||||
|
||||
if (mask & gpiostatus) {
|
||||
ipath_dbg("Unexpected GPIO IRQ bits %x\n",
|
||||
gpiostatus & mask);
|
||||
|
@ -397,6 +397,8 @@ struct ipath_devdata {
|
||||
unsigned long ipath_pioavailshadow[8];
|
||||
/* shadow of kr_gpio_out, for rmw ops */
|
||||
u64 ipath_gpio_out;
|
||||
/* shadow the gpio mask register */
|
||||
u64 ipath_gpio_mask;
|
||||
/* kr_revision shadow */
|
||||
u64 ipath_revision;
|
||||
/*
|
||||
|
@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd)
|
||||
* processing.
|
||||
*/
|
||||
if (dd->ipath_flags & IPATH_GPIO_INTR) {
|
||||
u64 val;
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
|
||||
0x2074076542310ULL);
|
||||
/* Enable GPIO bit 2 interrupt */
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
|
||||
val |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
|
||||
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
|
||||
dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
|
||||
dd->ipath_gpio_mask);
|
||||
}
|
||||
|
||||
init_timer(&dd->verbs_timer);
|
||||
@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd)
|
||||
u64 val;
|
||||
/* Disable GPIO bit 2 interrupt */
|
||||
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask);
|
||||
val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
|
||||
ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val);
|
||||
dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
|
||||
dd->ipath_gpio_mask);
|
||||
/*
|
||||
* We might want to undo changes to debugportselect,
|
||||
* but how?
|
||||
|
@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
|
||||
if (!ibdev->uar_map)
|
||||
goto err_uar;
|
||||
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ibdev->pgdir_list);
|
||||
mutex_init(&ibdev->pgdir_mutex);
|
||||
|
@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
|
||||
{
|
||||
struct mthca_cqe *cqe;
|
||||
u32 prod_index;
|
||||
int nfreed = 0;
|
||||
int i, nfreed = 0;
|
||||
|
||||
spin_lock_irq(&cq->lock);
|
||||
|
||||
@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
|
||||
}
|
||||
|
||||
if (nfreed) {
|
||||
for (i = 0; i < nfreed; ++i)
|
||||
set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
|
||||
wmb();
|
||||
cq->cons_index += nfreed;
|
||||
update_cons_index(dev, cq, nfreed);
|
||||
|
@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
dev->kar + MTHCA_RECEIVE_DOORBELL,
|
||||
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
|
||||
|
||||
qp->rq.next_ind = ind;
|
||||
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
|
||||
size0 = 0;
|
||||
}
|
||||
|
@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
|
||||
cm_id->context = p;
|
||||
p->jiffies = jiffies;
|
||||
spin_lock_irq(&priv->lock);
|
||||
if (list_empty(&priv->cm.passive_ids))
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
||||
list_add(&p->list, &priv->cm.passive_ids);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
||||
return 0;
|
||||
|
||||
err_rep:
|
||||
@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||
if (!list_empty(&p->list))
|
||||
list_move(&p->list, &priv->cm.passive_ids);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work)
|
||||
kfree(p);
|
||||
spin_lock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
if (!list_empty(&priv->cm.passive_ids))
|
||||
queue_delayed_work(ipoib_workqueue,
|
||||
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
|
@ -2508,6 +2508,7 @@ config MLX4_CORE
|
||||
|
||||
config MLX4_DEBUG
|
||||
bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
|
||||
depends on MLX4_CORE
|
||||
default y
|
||||
---help---
|
||||
This option causes debugging code to be compiled into the
|
||||
|
@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int err;
|
||||
|
||||
MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
|
||||
|
||||
err = mlx4_init_uar_table(dev);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to initialize "
|
||||
|
@ -275,7 +275,6 @@ struct mlx4_priv {
|
||||
|
||||
struct mlx4_uar driver_uar;
|
||||
void __iomem *kar;
|
||||
MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock)
|
||||
|
||||
u32 rev_id;
|
||||
char board_id[MLX4_BOARD_ID_LEN];
|
||||
|
Loading…
Reference in New Issue
Block a user