Merge branch 'hfi1' into k.o/for-next
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
commit
8206ceb096
@ -9956,7 +9956,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
|
||||
val = ppd->phy_error_threshold;
|
||||
break;
|
||||
case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
|
||||
val = dd->link_default;
|
||||
val = HLS_DEFAULT;
|
||||
break;
|
||||
|
||||
case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
|
||||
@ -10159,6 +10159,10 @@ static const char * const state_complete_reasons[] = {
|
||||
[0x33] =
|
||||
"Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
|
||||
[0x34] = tx_out_of_policy,
|
||||
[0x35] = "Negotiated link width is mutually exclusive",
|
||||
[0x36] =
|
||||
"Timed out before receiving verifycap frames in VerifyCap.Exchange",
|
||||
[0x37] = "Unable to resolve secure data exchange",
|
||||
};
|
||||
|
||||
static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
|
||||
@ -10547,7 +10551,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
|
||||
orig_new_state = state;
|
||||
if (state == HLS_DN_DOWNDEF)
|
||||
state = dd->link_default;
|
||||
state = HLS_DEFAULT;
|
||||
|
||||
/* interpret poll -> poll as a link bounce */
|
||||
poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
|
||||
@ -12925,7 +12929,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
if (!me->arg) /* => no irq, no affinity */
|
||||
continue;
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, i, me->arg);
|
||||
}
|
||||
|
||||
/* clean structures */
|
||||
@ -12935,7 +12939,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
} else {
|
||||
/* INTx */
|
||||
if (dd->requested_intx_irq) {
|
||||
free_irq(dd->pcidev->irq, dd);
|
||||
pci_free_irq(dd->pcidev, 0, dd);
|
||||
dd->requested_intx_irq = 0;
|
||||
}
|
||||
disable_intx(dd->pcidev);
|
||||
@ -12994,10 +12998,8 @@ static int request_intx_irq(struct hfi1_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
|
||||
dd->unit);
|
||||
ret = request_irq(dd->pcidev->irq, general_interrupt,
|
||||
IRQF_SHARED, dd->intx_name, dd);
|
||||
ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
|
||||
DRIVER_NAME "_%d", dd->unit);
|
||||
if (ret)
|
||||
dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
|
||||
ret);
|
||||
@ -13040,13 +13042,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
int idx;
|
||||
struct hfi1_ctxtdata *rcd = NULL;
|
||||
struct sdma_engine *sde = NULL;
|
||||
char name[MAX_NAME_SIZE];
|
||||
|
||||
/* obtain the arguments to request_irq */
|
||||
/* obtain the arguments to pci_request_irq */
|
||||
if (first_general <= i && i < last_general) {
|
||||
idx = i - first_general;
|
||||
handler = general_interrupt;
|
||||
arg = dd;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d", dd->unit);
|
||||
err_info = "general";
|
||||
me->type = IRQ_GENERAL;
|
||||
@ -13055,14 +13058,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
sde = &dd->per_sdma[idx];
|
||||
handler = sdma_interrupt;
|
||||
arg = sde;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
|
||||
err_info = "sdma";
|
||||
remap_sdma_interrupts(dd, idx, i);
|
||||
me->type = IRQ_SDMA;
|
||||
} else if (first_rx <= i && i < last_rx) {
|
||||
idx = i - first_rx;
|
||||
rcd = hfi1_rcd_get_by_index(dd, idx);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, idx);
|
||||
if (rcd) {
|
||||
/*
|
||||
* Set the interrupt register and mask for this
|
||||
@ -13074,7 +13077,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
handler = receive_context_interrupt;
|
||||
thread = receive_context_thread;
|
||||
arg = rcd;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d kctxt%d",
|
||||
dd->unit, idx);
|
||||
err_info = "receive context";
|
||||
@ -13095,18 +13098,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
if (!arg)
|
||||
continue;
|
||||
/* make sure the name is terminated */
|
||||
me->name[sizeof(me->name) - 1] = 0;
|
||||
name[sizeof(name) - 1] = 0;
|
||||
me->irq = pci_irq_vector(dd->pcidev, i);
|
||||
/*
|
||||
* On err return me->irq. Don't need to clear this
|
||||
* because 'arg' has not been set, and cleanup will
|
||||
* do the right thing.
|
||||
*/
|
||||
if (me->irq < 0)
|
||||
return me->irq;
|
||||
|
||||
ret = request_threaded_irq(me->irq, handler, thread, 0,
|
||||
me->name, arg);
|
||||
ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
|
||||
name);
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
|
||||
@ -13114,7 +13109,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* assign arg after request_irq call, so it will be
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
@ -13132,7 +13127,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
|
||||
int i;
|
||||
|
||||
if (!dd->num_msix_entries) {
|
||||
synchronize_irq(dd->pcidev->irq);
|
||||
synchronize_irq(pci_irq_vector(dd->pcidev, 0));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -13153,7 +13148,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
return;
|
||||
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
|
||||
me->arg = NULL;
|
||||
}
|
||||
@ -13176,28 +13171,21 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
||||
rcd->imask = ((u64)1) <<
|
||||
((IS_RCVAVAIL_START + idx) % 64);
|
||||
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
||||
me->name[sizeof(me->name) - 1] = 0;
|
||||
me->type = IRQ_RCVCTXT;
|
||||
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
|
||||
if (me->irq < 0) {
|
||||
dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
|
||||
idx, me->irq);
|
||||
return;
|
||||
}
|
||||
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
|
||||
|
||||
ret = request_threaded_irq(me->irq, receive_context_interrupt,
|
||||
receive_context_thread, 0, me->name, arg);
|
||||
ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
|
||||
receive_context_interrupt,
|
||||
receive_context_thread, arg,
|
||||
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
|
||||
me->irq, idx, ret);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* assign arg after request_irq call, so it will be
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
@ -13206,7 +13194,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to pin IRQ %d\n", ret);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -14907,8 +14895,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
|
||||
init_vl_arb_caches(ppd);
|
||||
}
|
||||
|
||||
dd->link_default = HLS_DN_POLL;
|
||||
|
||||
/*
|
||||
* Do remaining PCIe setup and save PCIe values in dd.
|
||||
* Any error printing is already done by the init code.
|
||||
|
@ -243,7 +243,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
|
||||
spos = v;
|
||||
i = *spos;
|
||||
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, i);
|
||||
if (!rcd)
|
||||
return SEQ_SKIP;
|
||||
|
||||
@ -402,7 +402,7 @@ static int _rcds_seq_show(struct seq_file *s, void *v)
|
||||
loff_t *spos = v;
|
||||
loff_t i = *spos;
|
||||
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, i);
|
||||
if (rcd)
|
||||
seqfile_dump_rcd(s, rcd);
|
||||
hfi1_rcd_put(rcd);
|
||||
|
@ -866,7 +866,7 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
|
||||
* interrupt handler for all statically allocated kernel contexts.
|
||||
*/
|
||||
if (ctxt >= dd->first_dyn_alloc_ctxt) {
|
||||
rcd = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
|
||||
if (rcd) {
|
||||
rcd->do_interrupt =
|
||||
&handle_receive_interrupt_nodma_rtail;
|
||||
@ -895,7 +895,7 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
|
||||
* interrupt handler for all statically allocated kernel contexts.
|
||||
*/
|
||||
if (ctxt >= dd->first_dyn_alloc_ctxt) {
|
||||
rcd = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
|
||||
if (rcd) {
|
||||
rcd->do_interrupt =
|
||||
&handle_receive_interrupt_dma_rtail;
|
||||
|
@ -78,16 +78,20 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
|
||||
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
|
||||
|
||||
static u64 kvirt_to_phys(void *addr);
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static void init_subctxts(struct hfi1_ctxtdata *uctxt,
|
||||
const struct hfi1_user_info *uinfo);
|
||||
static int init_user_ctxt(struct hfi1_filedata *fd,
|
||||
struct hfi1_ctxtdata *uctxt);
|
||||
static void user_init(struct hfi1_ctxtdata *uctxt);
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len);
|
||||
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len);
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int setup_base_ctxt(struct hfi1_filedata *fd,
|
||||
struct hfi1_ctxtdata *uctxt);
|
||||
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
|
||||
@ -101,10 +105,11 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
|
||||
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
|
||||
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
|
||||
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
unsigned long events);
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
|
||||
unsigned long arg);
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
|
||||
static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
|
||||
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
int start_stop);
|
||||
unsigned long arg);
|
||||
static int vma_fault(struct vm_fault *vmf);
|
||||
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
@ -221,13 +226,8 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
{
|
||||
struct hfi1_filedata *fd = fp->private_data;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
struct hfi1_user_info uinfo;
|
||||
struct hfi1_tid_info tinfo;
|
||||
int ret = 0;
|
||||
unsigned long addr;
|
||||
int uval = 0;
|
||||
unsigned long ul_uval = 0;
|
||||
u16 uval16 = 0;
|
||||
|
||||
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
|
||||
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
|
||||
@ -237,171 +237,55 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
|
||||
switch (cmd) {
|
||||
case HFI1_IOCTL_ASSIGN_CTXT:
|
||||
if (uctxt)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&uinfo,
|
||||
(struct hfi1_user_info __user *)arg,
|
||||
sizeof(uinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = assign_ctxt(fd, &uinfo);
|
||||
ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CTXT_INFO:
|
||||
ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
|
||||
sizeof(struct hfi1_ctxt_info));
|
||||
ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_USER_INFO:
|
||||
ret = get_base_info(fd, (void __user *)(unsigned long)arg,
|
||||
sizeof(struct hfi1_base_info));
|
||||
ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CREDIT_UPD:
|
||||
if (uctxt)
|
||||
sc_return_credits(uctxt->sc);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_UPDATE:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy the number of tidlist entries we used
|
||||
* and the length of the buffer we registered.
|
||||
*/
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_FREE:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
|
||||
if (ret)
|
||||
break;
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_INVAL_READ:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
if (ret)
|
||||
break;
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_RECV_CTRL:
|
||||
ret = get_user(uval, (int __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
ret = manage_rcvq(uctxt, fd->subctxt, uval);
|
||||
ret = manage_rcvq(uctxt, fd->subctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_POLL_TYPE:
|
||||
ret = get_user(uval, (int __user *)arg);
|
||||
if (ret != 0)
|
||||
if (get_user(uval, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_ACK_EVENT:
|
||||
ret = get_user(ul_uval, (unsigned long __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
|
||||
ret = user_event_ack(uctxt, fd->subctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_SET_PKEY:
|
||||
ret = get_user(uval16, (u16 __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
if (HFI1_CAP_IS_USET(PKEY_CHECK))
|
||||
ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
|
||||
else
|
||||
return -EPERM;
|
||||
ret = set_ctxt_pkey(uctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CTXT_RESET: {
|
||||
struct send_context *sc;
|
||||
struct hfi1_devdata *dd;
|
||||
|
||||
if (!uctxt || !uctxt->dd || !uctxt->sc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* There is no protection here. User level has to
|
||||
* guarantee that no one will be writing to the send
|
||||
* context while it is being re-initialized.
|
||||
* If user level breaks that guarantee, it will break
|
||||
* it's own context and no one else's.
|
||||
*/
|
||||
dd = uctxt->dd;
|
||||
sc = uctxt->sc;
|
||||
/*
|
||||
* Wait until the interrupt handler has marked the
|
||||
* context as halted or frozen. Report error if we time
|
||||
* out.
|
||||
*/
|
||||
wait_event_interruptible_timeout(
|
||||
sc->halt_wait, (sc->flags & SCF_HALTED),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (!(sc->flags & SCF_HALTED))
|
||||
return -ENOLCK;
|
||||
|
||||
/*
|
||||
* If the send context was halted due to a Freeze,
|
||||
* wait until the device has been "unfrozen" before
|
||||
* resetting the context.
|
||||
*/
|
||||
if (sc->flags & SCF_FROZEN) {
|
||||
wait_event_interruptible_timeout(
|
||||
dd->event_queue,
|
||||
!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (dd->flags & HFI1_FROZEN)
|
||||
return -ENOLCK;
|
||||
|
||||
if (dd->flags & HFI1_FORCED_FREEZE)
|
||||
/*
|
||||
* Don't allow context reset if we are into
|
||||
* forced freeze
|
||||
*/
|
||||
return -ENODEV;
|
||||
|
||||
sc_disable(sc);
|
||||
ret = sc_enable(sc);
|
||||
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
|
||||
} else {
|
||||
ret = sc_restart(sc);
|
||||
}
|
||||
if (!ret)
|
||||
sc_return_credits(sc);
|
||||
case HFI1_IOCTL_CTXT_RESET:
|
||||
ret = ctxt_reset(uctxt);
|
||||
break;
|
||||
}
|
||||
|
||||
case HFI1_IOCTL_GET_VERS:
|
||||
uval = HFI1_USER_SWVERSION;
|
||||
@ -595,9 +479,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
* Use the page where this context's flags are. User level
|
||||
* knows where it's own bitmap is within the page.
|
||||
*/
|
||||
memaddr = (unsigned long)(dd->events +
|
||||
((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
|
||||
memaddr = (unsigned long)
|
||||
(dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
|
||||
memlen = PAGE_SIZE;
|
||||
/*
|
||||
* v3.7 removes VM_RESERVED but the effect is kept by
|
||||
@ -779,8 +662,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
|
||||
* Clear any left over, unhandled events so the next process that
|
||||
* gets this context doesn't get confused.
|
||||
*/
|
||||
ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
|
||||
ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
|
||||
*ev = 0;
|
||||
|
||||
spin_lock_irqsave(&dd->uctxt_lock, flags);
|
||||
@ -891,20 +773,30 @@ static int complete_subctxt(struct hfi1_filedata *fd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned int swmajor, swminor;
|
||||
struct hfi1_ctxtdata *uctxt = NULL;
|
||||
struct hfi1_user_info uinfo;
|
||||
|
||||
swmajor = uinfo->userversion >> 16;
|
||||
if (fd->uctxt)
|
||||
return -EINVAL;
|
||||
|
||||
if (sizeof(uinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
swmajor = uinfo.userversion >> 16;
|
||||
if (swmajor != HFI1_USER_SWMAJOR)
|
||||
return -ENODEV;
|
||||
|
||||
if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
|
||||
if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
|
||||
return -EINVAL;
|
||||
|
||||
swminor = uinfo->userversion & 0xffff;
|
||||
swminor = uinfo.userversion & 0xffff;
|
||||
|
||||
/*
|
||||
* Acquire the mutex to protect against multiple creations of what
|
||||
@ -915,14 +807,14 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
|
||||
* Get a sub context if available (fd->uctxt will be set).
|
||||
* ret < 0 error, 0 no context, 1 sub-context found
|
||||
*/
|
||||
ret = find_sub_ctxt(fd, uinfo);
|
||||
ret = find_sub_ctxt(fd, &uinfo);
|
||||
|
||||
/*
|
||||
* Allocate a base context if context sharing is not required or a
|
||||
* sub context wasn't found.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
|
||||
ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
|
||||
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
|
||||
@ -1237,12 +1129,13 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
|
||||
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
|
||||
}
|
||||
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len)
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
struct hfi1_ctxt_info cinfo;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
int ret = 0;
|
||||
|
||||
if (sizeof(cinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&cinfo, 0, sizeof(cinfo));
|
||||
cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
|
||||
@ -1272,10 +1165,10 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
|
||||
|
||||
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
|
||||
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
|
||||
ret = -EFAULT;
|
||||
if (copy_to_user((void __user *)arg, &cinfo, len))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_user_ctxt(struct hfi1_filedata *fd,
|
||||
@ -1341,18 +1234,18 @@ setup_failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len)
|
||||
static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
struct hfi1_base_info binfo;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
ssize_t sz;
|
||||
unsigned offset;
|
||||
int ret = 0;
|
||||
|
||||
trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
|
||||
|
||||
if (sizeof(binfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&binfo, 0, sizeof(binfo));
|
||||
binfo.hw_version = dd->revision;
|
||||
binfo.sw_version = HFI1_KERN_SWVERSION;
|
||||
@ -1382,39 +1275,152 @@ static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
fd->subctxt,
|
||||
uctxt->egrbufs.rcvtids[0].dma);
|
||||
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
fd->subctxt, 0);
|
||||
/*
|
||||
* user regs are at
|
||||
* (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
|
||||
*/
|
||||
binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
|
||||
sizeof(*dd->events));
|
||||
fd->subctxt, 0);
|
||||
offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
|
||||
sizeof(*dd->events));
|
||||
binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
|
||||
fd->subctxt,
|
||||
offset);
|
||||
fd->subctxt,
|
||||
offset);
|
||||
binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
|
||||
fd->subctxt,
|
||||
dd->status);
|
||||
fd->subctxt,
|
||||
dd->status);
|
||||
if (HFI1_CAP_IS_USET(DMA_RTAIL))
|
||||
binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
fd->subctxt, 0);
|
||||
if (uctxt->subctxt_cnt) {
|
||||
binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
}
|
||||
sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
|
||||
if (copy_to_user(ubase, &binfo, sz))
|
||||
|
||||
if (copy_to_user((void __user *)arg, &binfo, len))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_setup - Set up the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* Wrapper to validate ioctl information before doing _rcv_setup.
|
||||
*
|
||||
*/
|
||||
static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy the number of tidlist entries we used
|
||||
* and the length of the buffer we registered.
|
||||
*/
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_clear - Clear the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* The hfi1_user_exp_rcv_clear() can be called from the error path. Because
|
||||
* of this, we need to use this wrapper to copy the user space information
|
||||
* before doing the clear.
|
||||
*/
|
||||
static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
|
||||
if (!ret) {
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_invalid - Invalidate the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* Wrapper to validate ioctl information before doing _rcv_invalid.
|
||||
*
|
||||
*/
|
||||
static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (!fd->invalid_tids)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1482,14 +1488,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
|
||||
ctxt++) {
|
||||
uctxt = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
if (uctxt) {
|
||||
unsigned long *evs = dd->events +
|
||||
(uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS;
|
||||
unsigned long *evs;
|
||||
int i;
|
||||
/*
|
||||
* subctxt_cnt is 0 if not shared, so do base
|
||||
* separately, first, then remaining subctxt, if any
|
||||
*/
|
||||
evs = dd->events + uctxt_offset(uctxt);
|
||||
set_bit(evtbit, evs);
|
||||
for (i = 1; i < uctxt->subctxt_cnt; i++)
|
||||
set_bit(evtbit, evs + i);
|
||||
@ -1511,13 +1516,18 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
|
||||
* re-init the software copy of the head register
|
||||
*/
|
||||
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
int start_stop)
|
||||
unsigned long arg)
|
||||
{
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
unsigned int rcvctrl_op;
|
||||
int start_stop;
|
||||
|
||||
if (subctxt)
|
||||
goto bail;
|
||||
return 0;
|
||||
|
||||
if (get_user(start_stop, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
/* atomically clear receive enable ctxt. */
|
||||
if (start_stop) {
|
||||
/*
|
||||
@ -1536,7 +1546,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
}
|
||||
hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
|
||||
/* always; new head should be equal to new tail; see above */
|
||||
bail:
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1546,17 +1556,20 @@ bail:
|
||||
* set, if desired, and checks again in future.
|
||||
*/
|
||||
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
unsigned long events)
|
||||
unsigned long arg)
|
||||
{
|
||||
int i;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
unsigned long *evs;
|
||||
unsigned long events;
|
||||
|
||||
if (!dd->events)
|
||||
return 0;
|
||||
|
||||
evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + subctxt;
|
||||
if (get_user(events, (unsigned long __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
evs = dd->events + uctxt_offset(uctxt) + subctxt;
|
||||
|
||||
for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
|
||||
if (!test_bit(i, &events))
|
||||
@ -1566,26 +1579,89 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
|
||||
{
|
||||
int ret = -ENOENT, i, intable = 0;
|
||||
int i;
|
||||
struct hfi1_pportdata *ppd = uctxt->ppd;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
u16 pkey;
|
||||
|
||||
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (!HFI1_CAP_IS_USET(PKEY_CHECK))
|
||||
return -EPERM;
|
||||
|
||||
if (get_user(pkey, (u16 __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
|
||||
if (pkey == ppd->pkeys[i]) {
|
||||
intable = 1;
|
||||
break;
|
||||
}
|
||||
if (pkey == ppd->pkeys[i])
|
||||
return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* ctxt_reset - Reset the user context
|
||||
* @uctxt: valid user context
|
||||
*/
|
||||
static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
|
||||
{
|
||||
struct send_context *sc;
|
||||
struct hfi1_devdata *dd;
|
||||
int ret = 0;
|
||||
|
||||
if (!uctxt || !uctxt->dd || !uctxt->sc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* There is no protection here. User level has to guarantee that
|
||||
* no one will be writing to the send context while it is being
|
||||
* re-initialized. If user level breaks that guarantee, it will
|
||||
* break it's own context and no one else's.
|
||||
*/
|
||||
dd = uctxt->dd;
|
||||
sc = uctxt->sc;
|
||||
|
||||
/*
|
||||
* Wait until the interrupt handler has marked the context as
|
||||
* halted or frozen. Report error if we time out.
|
||||
*/
|
||||
wait_event_interruptible_timeout(
|
||||
sc->halt_wait, (sc->flags & SCF_HALTED),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (!(sc->flags & SCF_HALTED))
|
||||
return -ENOLCK;
|
||||
|
||||
/*
|
||||
* If the send context was halted due to a Freeze, wait until the
|
||||
* device has been "unfrozen" before resetting the context.
|
||||
*/
|
||||
if (sc->flags & SCF_FROZEN) {
|
||||
wait_event_interruptible_timeout(
|
||||
dd->event_queue,
|
||||
!(READ_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (dd->flags & HFI1_FROZEN)
|
||||
return -ENOLCK;
|
||||
|
||||
if (dd->flags & HFI1_FORCED_FREEZE)
|
||||
/*
|
||||
* Don't allow context reset if we are into
|
||||
* forced freeze
|
||||
*/
|
||||
return -ENODEV;
|
||||
|
||||
sc_disable(sc);
|
||||
ret = sc_enable(sc);
|
||||
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
|
||||
} else {
|
||||
ret = sc_restart(sc);
|
||||
}
|
||||
if (!ret)
|
||||
sc_return_credits(sc);
|
||||
|
||||
if (intable)
|
||||
ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -535,6 +535,8 @@ struct rvt_sge_state;
|
||||
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
|
||||
#define HLS_DOWN ~(HLS_UP)
|
||||
|
||||
#define HLS_DEFAULT HLS_DN_POLL
|
||||
|
||||
/* use this MTU size if none other is given */
|
||||
#define HFI1_DEFAULT_ACTIVE_MTU 10240
|
||||
/* use this MTU size as the default maximum */
|
||||
@ -616,7 +618,6 @@ struct hfi1_msix_entry {
|
||||
enum irq_type type;
|
||||
int irq;
|
||||
void *arg;
|
||||
char name[MAX_NAME_SIZE];
|
||||
cpumask_t mask;
|
||||
struct irq_affinity_notify notify;
|
||||
};
|
||||
@ -1109,8 +1110,7 @@ struct hfi1_devdata {
|
||||
u16 rcvegrbufsize_shift;
|
||||
/* both sides of the PCIe link are gen3 capable */
|
||||
u8 link_gen3_capable;
|
||||
/* default link down value (poll/sleep) */
|
||||
u8 link_default;
|
||||
u8 dc_shutdown;
|
||||
/* localbus width (1, 2,4,8,16,32) from config space */
|
||||
u32 lbus_width;
|
||||
/* localbus speed in MHz */
|
||||
@ -1183,7 +1183,6 @@ struct hfi1_devdata {
|
||||
|
||||
/* INTx information */
|
||||
u32 requested_intx_irq; /* did we request one? */
|
||||
char intx_name[MAX_NAME_SIZE]; /* INTx name */
|
||||
|
||||
/* general interrupt: mask of handled interrupts */
|
||||
u64 gi_mask[CCE_NUM_INT_CSRS];
|
||||
@ -1295,7 +1294,6 @@ struct hfi1_devdata {
|
||||
u8 oui1;
|
||||
u8 oui2;
|
||||
u8 oui3;
|
||||
u8 dc_shutdown;
|
||||
|
||||
/* Timer and counter used to detect RcvBufOvflCnt changes */
|
||||
struct timer_list rcverr_timer;
|
||||
@ -1373,8 +1371,12 @@ struct hfi1_filedata {
|
||||
extern struct list_head hfi1_dev_list;
|
||||
extern spinlock_t hfi1_devs_lock;
|
||||
struct hfi1_devdata *hfi1_lookup(int unit);
|
||||
extern u32 hfi1_cpulist_count;
|
||||
extern unsigned long *hfi1_cpulist;
|
||||
|
||||
static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
|
||||
{
|
||||
return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS;
|
||||
}
|
||||
|
||||
int hfi1_init(struct hfi1_devdata *dd, int reinit);
|
||||
int hfi1_count_active_units(void);
|
||||
@ -1396,6 +1398,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
||||
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
|
||||
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
|
||||
void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
|
||||
u16 ctxt);
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
|
||||
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
|
||||
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
|
||||
|
@ -123,8 +123,6 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
|
||||
static inline u64 encode_rcv_header_entry_size(u16 size);
|
||||
|
||||
static struct idr hfi1_unit_table;
|
||||
u32 hfi1_cpulist_count;
|
||||
unsigned long *hfi1_cpulist;
|
||||
|
||||
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
|
||||
struct hfi1_pportdata *ppd)
|
||||
@ -285,6 +283,27 @@ static int allocate_rcd_index(struct hfi1_devdata *dd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
|
||||
* array
|
||||
* @dd: pointer to a valid devdata structure
|
||||
* @ctxt: the index of an possilbe rcd
|
||||
*
|
||||
* This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
|
||||
* ctxt index is valid.
|
||||
*
|
||||
* The caller is responsible for making the _put().
|
||||
*
|
||||
*/
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
|
||||
u16 ctxt)
|
||||
{
|
||||
if (ctxt < dd->num_rcv_contexts)
|
||||
return hfi1_rcd_get_by_index(dd, ctxt);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rcd_get_by_index
|
||||
* @dd: pointer to a valid devdata structure
|
||||
@ -1272,39 +1291,21 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
|
||||
dd->int_counter = alloc_percpu(u64);
|
||||
if (!dd->int_counter) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu int_counter\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd->rcv_limit = alloc_percpu(u64);
|
||||
if (!dd->rcv_limit) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu rcv_limit\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd->send_schedule = alloc_percpu(u64);
|
||||
if (!dd->send_schedule) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu int_counter\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (!hfi1_cpulist_count) {
|
||||
u32 count = num_online_cpus();
|
||||
|
||||
hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (hfi1_cpulist)
|
||||
hfi1_cpulist_count = count;
|
||||
else
|
||||
hfi1_early_err(
|
||||
&pdev->dev,
|
||||
"Could not alloc cpulist info, cpu affinity might be wrong\n");
|
||||
}
|
||||
kobject_init(&dd->kobj, &hfi1_devdata_type);
|
||||
return dd;
|
||||
|
||||
@ -1477,8 +1478,6 @@ static void __exit hfi1_mod_cleanup(void)
|
||||
node_affinity_destroy();
|
||||
hfi1_wss_exit();
|
||||
hfi1_dbg_exit();
|
||||
hfi1_cpulist_count = 0;
|
||||
kfree(hfi1_cpulist);
|
||||
|
||||
idr_destroy(&hfi1_unit_table);
|
||||
dispose_firmware(); /* asymmetric with obtain_firmware() */
|
||||
|
@ -1392,6 +1392,13 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
return ret;
|
||||
|
||||
idle_cnt = ns_to_cclock(dd, idle_cnt);
|
||||
if (idle_cnt)
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_HEAD_TO_HOST_FLAG;
|
||||
else
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_INT_REQ_FLAG;
|
||||
|
||||
if (!sdma_desct_intr)
|
||||
sdma_desct_intr = SDMA_DESC_INTR;
|
||||
|
||||
@ -1436,13 +1443,6 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
sde->tail_csr =
|
||||
get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
|
||||
|
||||
if (idle_cnt)
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_HEAD_TO_HOST_FLAG;
|
||||
else
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_INT_REQ_FLAG;
|
||||
|
||||
tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
|
||||
(unsigned long)sde);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -91,12 +91,17 @@ u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
|
||||
return __get_16b_hdr_len(&opa_hdr->opah);
|
||||
}
|
||||
|
||||
const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
|
||||
const char *hfi1_trace_get_packet_l4_str(u8 l4)
|
||||
{
|
||||
if (packet->etype != RHF_RCV_TYPE_BYPASS)
|
||||
return "IB";
|
||||
if (l4)
|
||||
return "16B";
|
||||
else
|
||||
return "9B";
|
||||
}
|
||||
|
||||
switch (hfi1_16B_get_l2(packet->hdr)) {
|
||||
const char *hfi1_trace_get_packet_l2_str(u8 l2)
|
||||
{
|
||||
switch (l2) {
|
||||
case 0:
|
||||
return "0";
|
||||
case 1:
|
||||
@ -109,14 +114,6 @@ const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
|
||||
return "";
|
||||
}
|
||||
|
||||
const char *hfi1_trace_get_packet_type_str(u8 l4)
|
||||
{
|
||||
if (l4)
|
||||
return "16B";
|
||||
else
|
||||
return "9B";
|
||||
}
|
||||
|
||||
#define IMM_PRN "imm:%d"
|
||||
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
|
||||
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
|
||||
|
@ -44,6 +44,16 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
|
||||
#define show_packettype(etype) \
|
||||
__print_symbolic(etype, \
|
||||
packettype_name(EXPECTED), \
|
||||
packettype_name(EAGER), \
|
||||
packettype_name(IB), \
|
||||
packettype_name(ERROR), \
|
||||
packettype_name(BYPASS))
|
||||
|
||||
#include "trace_dbg.h"
|
||||
#include "trace_misc.h"
|
||||
#include "trace_ctxts.h"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -99,8 +99,7 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr);
|
||||
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
|
||||
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
|
||||
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
|
||||
const char *hfi1_trace_get_packet_type_str(u8 l4);
|
||||
const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
|
||||
const char *hfi1_trace_get_packet_l4_str(u8 l4);
|
||||
void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
|
||||
u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
|
||||
u8 *se, u8 *pad, u8 *opcode, u8 *tver,
|
||||
@ -129,6 +128,8 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
|
||||
u8 se, u8 pad, u8 opcode, const char *opname,
|
||||
u8 tver, u16 pkey, u32 psn, u32 qpn);
|
||||
|
||||
const char *hfi1_trace_get_packet_l2_str(u8 l2);
|
||||
|
||||
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
|
||||
|
||||
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
|
||||
@ -136,8 +137,6 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
|
||||
__print_symbolic(lrh, \
|
||||
lrh_name(LRH_BTH), \
|
||||
lrh_name(LRH_GRH))
|
||||
#define PKT_ENTRY(pkt) __string(ptype, hfi1_trace_get_packet_str(packet))
|
||||
#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
|
||||
|
||||
DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
TP_PROTO(struct hfi1_devdata *dd,
|
||||
@ -146,12 +145,12 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
TP_ARGS(dd, packet, sc5),
|
||||
TP_STRUCT__entry(
|
||||
DD_DEV_ENTRY(dd)
|
||||
PKT_ENTRY(packet)
|
||||
__field(bool, bypass)
|
||||
__field(u8, etype)
|
||||
__field(u8, ack)
|
||||
__field(u8, age)
|
||||
__field(u8, becn)
|
||||
__field(u8, fecn)
|
||||
__field(u8, l2)
|
||||
__field(u8, l4)
|
||||
__field(u8, lnh)
|
||||
__field(u8, lver)
|
||||
@ -176,10 +175,10 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
),
|
||||
TP_fast_assign(
|
||||
DD_DEV_ASSIGN(dd);
|
||||
PKT_ASSIGN(packet);
|
||||
|
||||
if (packet->etype == RHF_RCV_TYPE_BYPASS) {
|
||||
__entry->bypass = true;
|
||||
__entry->etype = packet->etype;
|
||||
__entry->l2 = hfi1_16B_get_l2(packet->hdr);
|
||||
if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
|
||||
hfi1_trace_parse_16b_hdr(packet->hdr,
|
||||
&__entry->age,
|
||||
&__entry->becn,
|
||||
@ -203,7 +202,6 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
&__entry->psn,
|
||||
&__entry->qpn);
|
||||
} else {
|
||||
__entry->bypass = false;
|
||||
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
|
||||
&__entry->lnh,
|
||||
&__entry->lver,
|
||||
@ -233,9 +231,13 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
),
|
||||
TP_printk("[%s] (%s) %s %s hlen:%d %s",
|
||||
__get_str(dev),
|
||||
__get_str(ptype),
|
||||
__entry->etype != RHF_RCV_TYPE_BYPASS ?
|
||||
show_packettype(__entry->etype) :
|
||||
hfi1_trace_get_packet_l2_str(
|
||||
__entry->l2),
|
||||
hfi1_trace_fmt_lrh(p,
|
||||
__entry->bypass,
|
||||
__entry->etype ==
|
||||
RHF_RCV_TYPE_BYPASS,
|
||||
__entry->age,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -252,7 +254,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
__entry->dlid,
|
||||
__entry->slid),
|
||||
hfi1_trace_fmt_bth(p,
|
||||
__entry->bypass,
|
||||
__entry->etype ==
|
||||
RHF_RCV_TYPE_BYPASS,
|
||||
__entry->ack,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -284,7 +287,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
TP_ARGS(dd, opah, sc5),
|
||||
TP_STRUCT__entry(
|
||||
DD_DEV_ENTRY(dd)
|
||||
__field(bool, bypass)
|
||||
__field(u8, hdr_type)
|
||||
__field(u8, ack)
|
||||
__field(u8, age)
|
||||
__field(u8, becn)
|
||||
@ -316,8 +319,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
|
||||
DD_DEV_ASSIGN(dd);
|
||||
|
||||
if (opah->hdr_type) {
|
||||
__entry->bypass = true;
|
||||
__entry->hdr_type = opah->hdr_type;
|
||||
if (__entry->hdr_type) {
|
||||
hfi1_trace_parse_16b_hdr(&opah->opah,
|
||||
&__entry->age,
|
||||
&__entry->becn,
|
||||
@ -345,7 +348,6 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
&__entry->psn,
|
||||
&__entry->qpn);
|
||||
} else {
|
||||
__entry->bypass = false;
|
||||
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
|
||||
&__entry->lnh,
|
||||
&__entry->lver,
|
||||
@ -378,9 +380,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
),
|
||||
TP_printk("[%s] (%s) %s %s hlen:%d %s",
|
||||
__get_str(dev),
|
||||
hfi1_trace_get_packet_type_str(__entry->l4),
|
||||
hfi1_trace_get_packet_l4_str(__entry->l4),
|
||||
hfi1_trace_fmt_lrh(p,
|
||||
__entry->bypass,
|
||||
!!__entry->hdr_type,
|
||||
__entry->age,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -397,7 +399,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
__entry->dlid,
|
||||
__entry->slid),
|
||||
hfi1_trace_fmt_bth(p,
|
||||
__entry->bypass,
|
||||
!!__entry->hdr_type,
|
||||
__entry->ack,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -62,15 +62,6 @@ __print_symbolic(type, \
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM hfi1_rx
|
||||
|
||||
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
|
||||
#define show_packettype(etype) \
|
||||
__print_symbolic(etype, \
|
||||
packettype_name(EXPECTED), \
|
||||
packettype_name(EAGER), \
|
||||
packettype_name(IB), \
|
||||
packettype_name(ERROR), \
|
||||
packettype_name(BYPASS))
|
||||
|
||||
TRACE_EVENT(hfi1_rcvhdr,
|
||||
TP_PROTO(struct hfi1_devdata *dd,
|
||||
u32 ctxt,
|
||||
|
@ -542,14 +542,10 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
|
||||
{
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
unsigned long *ev = uctxt->dd->events +
|
||||
(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
|
||||
(uctxt_offset(uctxt) + fd->subctxt);
|
||||
u32 *array;
|
||||
int ret = 0;
|
||||
|
||||
if (!fd->invalid_tids)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* copy_to_user() can sleep, which will leave the invalid_lock
|
||||
* locked and cause the MMU notifier to be blocked on the lock
|
||||
@ -942,8 +938,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
|
||||
* process in question.
|
||||
*/
|
||||
ev = uctxt->dd->events +
|
||||
(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
|
||||
(uctxt_offset(uctxt) + fdata->subctxt);
|
||||
set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
|
||||
}
|
||||
fdata->invalid_tid_idx++;
|
||||
|
@ -956,10 +956,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
SDMA_DBG(req, "Failed page array alloc");
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
||||
|
||||
npages -= node->npages;
|
||||
@ -1254,20 +1252,25 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx, u32 datalen)
|
||||
{
|
||||
u32 ahg[AHG_KDETH_ARRAY_SIZE];
|
||||
int diff = 0;
|
||||
int idx = 0;
|
||||
u8 omfactor; /* KDETH.OM */
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct hfi1_pkt_header *hdr = &req->hdr;
|
||||
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
|
||||
u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
|
||||
size_t array_size = ARRAY_SIZE(ahg);
|
||||
|
||||
if (PBC2LRH(pbclen) != lrhlen) {
|
||||
/* PBC.PbcLengthDWs */
|
||||
AHG_HEADER_SET(ahg, diff, 0, 0, 12,
|
||||
cpu_to_le16(LRH2PBC(lrhlen)));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
|
||||
(__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
|
||||
AHG_HEADER_SET(ahg, diff, 3, 0, 16,
|
||||
cpu_to_be16(lrhlen >> 2));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
|
||||
(__force u16)cpu_to_be16(lrhlen >> 2));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1278,12 +1281,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
|
||||
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
|
||||
val32 |= 1UL << 31;
|
||||
AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
|
||||
AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
|
||||
(__force u16)cpu_to_be16(val32 >> 16));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
|
||||
(__force u16)cpu_to_be16(val32 & 0xffff));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* KDETH.Offset */
|
||||
AHG_HEADER_SET(ahg, diff, 15, 0, 16,
|
||||
cpu_to_le16(req->koffset & 0xffff));
|
||||
AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
|
||||
(__force u16)cpu_to_le16(req->koffset & 0xffff));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
|
||||
(__force u16)cpu_to_le16(req->koffset >> 16));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
if (req_opcode(req->info.ctrl) == EXPECTED) {
|
||||
__le16 val;
|
||||
|
||||
@ -1310,10 +1324,13 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
|
||||
KDETH_OM_SMALL_SHIFT;
|
||||
/* KDETH.OM and KDETH.OFFSET (TID) */
|
||||
AHG_HEADER_SET(ahg, diff, 7, 0, 16,
|
||||
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
|
||||
idx = ahg_header_set(
|
||||
ahg, idx, array_size, 7, 0, 16,
|
||||
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
|
||||
((req->tidoffset >> omfactor)
|
||||
& 0x7fff)));
|
||||
& 0x7fff)));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
|
||||
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
|
||||
(EXP_TID_GET(tidval, IDX) & 0x3ff));
|
||||
@ -1330,21 +1347,22 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
AHG_KDETH_INTR_SHIFT));
|
||||
}
|
||||
|
||||
AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
|
||||
idx = ahg_header_set(ahg, idx, array_size,
|
||||
7, 16, 14, (__force u16)val);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
}
|
||||
if (diff < 0)
|
||||
return diff;
|
||||
|
||||
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
|
||||
req->info.comp_idx, req->sde->this_idx,
|
||||
req->ahg_idx, ahg, diff, tidval);
|
||||
req->ahg_idx, ahg, idx, tidval);
|
||||
sdma_txinit_ahg(&tx->txreq,
|
||||
SDMA_TXREQ_F_USE_AHG,
|
||||
datalen, req->ahg_idx, diff,
|
||||
datalen, req->ahg_idx, idx,
|
||||
ahg, sizeof(req->hdr),
|
||||
user_sdma_txreq_cb);
|
||||
|
||||
return diff;
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -80,15 +80,26 @@
|
||||
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
|
||||
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
|
||||
|
||||
#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
|
||||
do { \
|
||||
if ((idx) < ARRAY_SIZE((arr))) \
|
||||
(arr)[(idx++)] = sdma_build_ahg_descriptor( \
|
||||
(__force u16)(value), (dw), (bit), \
|
||||
(width)); \
|
||||
else \
|
||||
return -ERANGE; \
|
||||
} while (0)
|
||||
/**
|
||||
* Build an SDMA AHG header update descriptor and save it to an array.
|
||||
* @arr - Array to save the descriptor to.
|
||||
* @idx - Index of the array at which the descriptor will be saved.
|
||||
* @array_size - Size of the array arr.
|
||||
* @dw - Update index into the header in DWs.
|
||||
* @bit - Start bit.
|
||||
* @width - Field width.
|
||||
* @value - 16 bits of immediate data to write into the field.
|
||||
* Returns -ERANGE if idx is invalid. If successful, returns the next index
|
||||
* (idx + 1) of the array to be used for the next descriptor.
|
||||
*/
|
||||
static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
|
||||
u8 dw, u8 bit, u8 width, u16 value)
|
||||
{
|
||||
if ((size_t)idx >= array_size)
|
||||
return -ERANGE;
|
||||
arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* Tx request flag bits */
|
||||
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
|
||||
|
@ -443,14 +443,12 @@ struct qib_irq_notify;
|
||||
#endif
|
||||
|
||||
struct qib_msix_entry {
|
||||
int irq;
|
||||
void *arg;
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
int dca;
|
||||
int rcv;
|
||||
struct qib_irq_notify *notifier;
|
||||
#endif
|
||||
char name[MAX_NAME_SIZE];
|
||||
cpumask_var_t mask;
|
||||
};
|
||||
|
||||
@ -1434,10 +1432,8 @@ int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
|
||||
const struct pci_device_id *);
|
||||
void qib_pcie_ddcleanup(struct qib_devdata *);
|
||||
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent);
|
||||
int qib_reinit_intr(struct qib_devdata *);
|
||||
void qib_enable_intx(struct qib_devdata *dd);
|
||||
void qib_nomsi(struct qib_devdata *);
|
||||
void qib_nomsix(struct qib_devdata *);
|
||||
void qib_free_irq(struct qib_devdata *dd);
|
||||
int qib_reinit_intr(struct qib_devdata *dd);
|
||||
void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *);
|
||||
void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8);
|
||||
/* interrupts for device */
|
||||
|
@ -67,7 +67,6 @@ struct qib_chip_specific {
|
||||
u32 lastbuf_for_pio;
|
||||
u32 updthresh; /* current AvailUpdThld */
|
||||
u32 updthresh_dflt; /* default AvailUpdThld */
|
||||
int irq;
|
||||
u8 presets_needed;
|
||||
u8 relock_timer_active;
|
||||
char emsgbuf[128];
|
||||
|
@ -245,7 +245,6 @@ struct qib_chip_specific {
|
||||
u64 iblnkerrsnap;
|
||||
u64 ibcctrl; /* shadow for kr_ibcctrl */
|
||||
u32 lastlinkrecov; /* link recovery issue */
|
||||
int irq;
|
||||
u32 cntrnamelen;
|
||||
u32 portcntrnamelen;
|
||||
u32 ncntrs;
|
||||
@ -1485,15 +1484,6 @@ static void qib_6120_setup_setextled(struct qib_pportdata *ppd, u32 on)
|
||||
spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
|
||||
}
|
||||
|
||||
static void qib_6120_free_irq(struct qib_devdata *dd)
|
||||
{
|
||||
if (dd->cspec->irq) {
|
||||
free_irq(dd->cspec->irq, dd);
|
||||
dd->cspec->irq = 0;
|
||||
}
|
||||
qib_nomsi(dd);
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_6120_setup_cleanup - clean up any per-chip chip-specific stuff
|
||||
* @dd: the qlogic_ib device
|
||||
@ -1502,7 +1492,7 @@ static void qib_6120_free_irq(struct qib_devdata *dd)
|
||||
*/
|
||||
static void qib_6120_setup_cleanup(struct qib_devdata *dd)
|
||||
{
|
||||
qib_6120_free_irq(dd);
|
||||
qib_free_irq(dd);
|
||||
kfree(dd->cspec->cntrs);
|
||||
kfree(dd->cspec->portcntrs);
|
||||
if (dd->cspec->dummy_hdrq) {
|
||||
@ -1706,6 +1696,8 @@ bail:
|
||||
*/
|
||||
static void qib_setup_6120_interrupt(struct qib_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the chip supports added error indication via GPIO pins,
|
||||
* enable interrupts on those bits so the interrupt routine
|
||||
@ -1719,19 +1711,12 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
|
||||
qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
|
||||
}
|
||||
|
||||
if (!dd->cspec->irq)
|
||||
ret = pci_request_irq(dd->pcidev, 0, qib_6120intr, NULL, dd,
|
||||
QIB_DRV_NAME);
|
||||
if (ret)
|
||||
qib_dev_err(dd,
|
||||
"irq is 0, BIOS error? Interrupts won't work\n");
|
||||
else {
|
||||
int ret;
|
||||
|
||||
ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
|
||||
QIB_DRV_NAME, dd);
|
||||
if (ret)
|
||||
qib_dev_err(dd,
|
||||
"Couldn't setup interrupt (irq=%d): %d\n",
|
||||
dd->cspec->irq, ret);
|
||||
}
|
||||
"Couldn't setup interrupt (irq=%d): %d\n",
|
||||
pci_irq_vector(dd->pcidev, 0), ret);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3490,7 +3475,7 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
|
||||
dd->f_bringup_serdes = qib_6120_bringup_serdes;
|
||||
dd->f_cleanup = qib_6120_setup_cleanup;
|
||||
dd->f_clear_tids = qib_6120_clear_tids;
|
||||
dd->f_free_irq = qib_6120_free_irq;
|
||||
dd->f_free_irq = qib_free_irq;
|
||||
dd->f_get_base_info = qib_6120_get_base_info;
|
||||
dd->f_get_msgheader = qib_6120_get_msgheader;
|
||||
dd->f_getsendbuf = qib_6120_getsendbuf;
|
||||
@ -3559,8 +3544,6 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
|
||||
if (qib_pcie_params(dd, 8, NULL))
|
||||
qib_dev_err(dd,
|
||||
"Failed to setup PCIe or interrupts; continuing anyway\n");
|
||||
dd->cspec->irq = pdev->irq; /* save IRQ */
|
||||
|
||||
/* clear diagctrl register, in case diags were running and crashed */
|
||||
qib_write_kreg(dd, kr_hwdiagctrl, 0);
|
||||
|
||||
|
@ -1780,15 +1780,6 @@ static void qib_setup_7220_setextled(struct qib_pportdata *ppd, u32 on)
|
||||
qib_write_kreg(dd, kr_rcvpktledcnt, ledblink);
|
||||
}
|
||||
|
||||
static void qib_7220_free_irq(struct qib_devdata *dd)
|
||||
{
|
||||
if (dd->cspec->irq) {
|
||||
free_irq(dd->cspec->irq, dd);
|
||||
dd->cspec->irq = 0;
|
||||
}
|
||||
qib_nomsi(dd);
|
||||
}
|
||||
|
||||
/*
|
||||
* qib_setup_7220_cleanup - clean up any per-chip chip-specific stuff
|
||||
* @dd: the qlogic_ib device
|
||||
@ -1798,7 +1789,7 @@ static void qib_7220_free_irq(struct qib_devdata *dd)
|
||||
*/
|
||||
static void qib_setup_7220_cleanup(struct qib_devdata *dd)
|
||||
{
|
||||
qib_7220_free_irq(dd);
|
||||
qib_free_irq(dd);
|
||||
kfree(dd->cspec->cntrs);
|
||||
kfree(dd->cspec->portcntrs);
|
||||
}
|
||||
@ -2026,20 +2017,14 @@ bail:
|
||||
*/
|
||||
static void qib_setup_7220_interrupt(struct qib_devdata *dd)
|
||||
{
|
||||
if (!dd->cspec->irq)
|
||||
qib_dev_err(dd,
|
||||
"irq is 0, BIOS error? Interrupts won't work\n");
|
||||
else {
|
||||
int ret = request_irq(dd->cspec->irq, qib_7220intr,
|
||||
dd->msi_lo ? 0 : IRQF_SHARED,
|
||||
QIB_DRV_NAME, dd);
|
||||
int ret;
|
||||
|
||||
if (ret)
|
||||
qib_dev_err(dd,
|
||||
"Couldn't setup %s interrupt (irq=%d): %d\n",
|
||||
dd->msi_lo ? "MSI" : "INTx",
|
||||
dd->cspec->irq, ret);
|
||||
}
|
||||
ret = pci_request_irq(dd->pcidev, 0, qib_7220intr, NULL, dd,
|
||||
QIB_DRV_NAME);
|
||||
if (ret)
|
||||
qib_dev_err(dd, "Couldn't setup %s interrupt (irq=%d): %d\n",
|
||||
dd->pcidev->msi_enabled ? "MSI" : "INTx",
|
||||
pci_irq_vector(dd->pcidev, 0), ret);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3302,16 +3287,12 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
|
||||
return 0;
|
||||
|
||||
qib_devinfo(dd->pcidev,
|
||||
"MSI interrupt not detected, trying INTx interrupts\n");
|
||||
qib_7220_free_irq(dd);
|
||||
qib_enable_intx(dd);
|
||||
/*
|
||||
* Some newer kernels require free_irq before disable_msi,
|
||||
* and irq can be changed during disable and INTx enable
|
||||
* and we need to therefore use the pcidev->irq value,
|
||||
* not our saved MSI value.
|
||||
*/
|
||||
dd->cspec->irq = dd->pcidev->irq;
|
||||
"MSI interrupt not detected, trying INTx interrupts\n");
|
||||
|
||||
qib_free_irq(dd);
|
||||
dd->msi_lo = 0;
|
||||
if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
|
||||
qib_dev_err(dd, "Failed to enable INTx\n");
|
||||
qib_setup_7220_interrupt(dd);
|
||||
return 1;
|
||||
}
|
||||
@ -4535,7 +4516,7 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
|
||||
dd->f_bringup_serdes = qib_7220_bringup_serdes;
|
||||
dd->f_cleanup = qib_setup_7220_cleanup;
|
||||
dd->f_clear_tids = qib_7220_clear_tids;
|
||||
dd->f_free_irq = qib_7220_free_irq;
|
||||
dd->f_free_irq = qib_free_irq;
|
||||
dd->f_get_base_info = qib_7220_get_base_info;
|
||||
dd->f_get_msgheader = qib_7220_get_msgheader;
|
||||
dd->f_getsendbuf = qib_7220_getsendbuf;
|
||||
@ -4618,9 +4599,6 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
|
||||
qib_dev_err(dd,
|
||||
"Failed to setup PCIe or interrupts; continuing anyway\n");
|
||||
|
||||
/* save IRQ for possible later use */
|
||||
dd->cspec->irq = pdev->irq;
|
||||
|
||||
if (qib_read_kreg64(dd, kr_hwerrstatus) &
|
||||
QLOGIC_IB_HWE_SERDESPLLFAILED)
|
||||
qib_write_kreg(dd, kr_hwerrclear,
|
||||
|
@ -553,7 +553,6 @@ struct qib_chip_specific {
|
||||
u32 updthresh; /* current AvailUpdThld */
|
||||
u32 updthresh_dflt; /* default AvailUpdThld */
|
||||
u32 r1;
|
||||
int irq;
|
||||
u32 num_msix_entries;
|
||||
u32 sdmabufcnt;
|
||||
u32 lastbuf_for_pio;
|
||||
@ -756,10 +755,8 @@ static void check_7322_rxe_status(struct qib_pportdata *);
|
||||
static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
static void qib_setup_dca(struct qib_devdata *dd);
|
||||
static void setup_dca_notifier(struct qib_devdata *dd,
|
||||
struct qib_msix_entry *m);
|
||||
static void reset_dca_notifier(struct qib_devdata *dd,
|
||||
struct qib_msix_entry *m);
|
||||
static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
|
||||
static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -2778,7 +2775,7 @@ static void qib_setup_dca(struct qib_devdata *dd)
|
||||
qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
|
||||
cspec->dca_rcvhdr_ctrl[i]);
|
||||
for (i = 0; i < cspec->num_msix_entries; i++)
|
||||
setup_dca_notifier(dd, &cspec->msix_entries[i]);
|
||||
setup_dca_notifier(dd, i);
|
||||
}
|
||||
|
||||
static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
|
||||
@ -2820,49 +2817,41 @@ static void qib_irq_notifier_release(struct kref *ref)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disable MSIx interrupt if enabled, call generic MSIx code
|
||||
* to cleanup, and clear pending MSIx interrupts.
|
||||
* Used for fallback to INTx, after reset, and when MSIx setup fails.
|
||||
*/
|
||||
static void qib_7322_nomsix(struct qib_devdata *dd)
|
||||
static void qib_7322_free_irq(struct qib_devdata *dd)
|
||||
{
|
||||
u64 intgranted;
|
||||
int n;
|
||||
int i;
|
||||
|
||||
dd->cspec->main_int_mask = ~0ULL;
|
||||
n = dd->cspec->num_msix_entries;
|
||||
if (n) {
|
||||
int i;
|
||||
|
||||
dd->cspec->num_msix_entries = 0;
|
||||
for (i = 0; i < n; i++) {
|
||||
for (i = 0; i < dd->cspec->num_msix_entries; i++) {
|
||||
/* only free IRQs that were allocated */
|
||||
if (dd->cspec->msix_entries[i].arg) {
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
reset_dca_notifier(dd, &dd->cspec->msix_entries[i]);
|
||||
reset_dca_notifier(dd, i);
|
||||
#endif
|
||||
irq_set_affinity_hint(
|
||||
dd->cspec->msix_entries[i].irq, NULL);
|
||||
irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
|
||||
NULL);
|
||||
free_cpumask_var(dd->cspec->msix_entries[i].mask);
|
||||
free_irq(dd->cspec->msix_entries[i].irq,
|
||||
dd->cspec->msix_entries[i].arg);
|
||||
pci_free_irq(dd->pcidev, i,
|
||||
dd->cspec->msix_entries[i].arg);
|
||||
}
|
||||
qib_nomsix(dd);
|
||||
}
|
||||
|
||||
/* If num_msix_entries was 0, disable the INTx IRQ */
|
||||
if (!dd->cspec->num_msix_entries)
|
||||
pci_free_irq(dd->pcidev, 0, dd);
|
||||
else
|
||||
dd->cspec->num_msix_entries = 0;
|
||||
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
|
||||
/* make sure no MSIx interrupts are left pending */
|
||||
intgranted = qib_read_kreg64(dd, kr_intgranted);
|
||||
if (intgranted)
|
||||
qib_write_kreg(dd, kr_intgranted, intgranted);
|
||||
}
|
||||
|
||||
static void qib_7322_free_irq(struct qib_devdata *dd)
|
||||
{
|
||||
if (dd->cspec->irq) {
|
||||
free_irq(dd->cspec->irq, dd);
|
||||
dd->cspec->irq = 0;
|
||||
}
|
||||
qib_7322_nomsix(dd);
|
||||
}
|
||||
|
||||
static void qib_setup_7322_cleanup(struct qib_devdata *dd)
|
||||
{
|
||||
int i;
|
||||
@ -3329,22 +3318,20 @@ static irqreturn_t sdma_cleanup_intr(int irq, void *data)
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
|
||||
static void reset_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
|
||||
static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
|
||||
{
|
||||
if (!m->dca)
|
||||
if (!dd->cspec->msix_entries[msixnum].dca)
|
||||
return;
|
||||
qib_devinfo(dd->pcidev,
|
||||
"Disabling notifier on HCA %d irq %d\n",
|
||||
dd->unit,
|
||||
m->irq);
|
||||
irq_set_affinity_notifier(
|
||||
m->irq,
|
||||
NULL);
|
||||
m->notifier = NULL;
|
||||
|
||||
qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
|
||||
dd->unit, pci_irq_vector(dd->pcidev, msixnum));
|
||||
irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
|
||||
dd->cspec->msix_entries[msixnum].notifier = NULL;
|
||||
}
|
||||
|
||||
static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
|
||||
static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
|
||||
{
|
||||
struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
|
||||
struct qib_irq_notify *n;
|
||||
|
||||
if (!m->dca)
|
||||
@ -3354,7 +3341,7 @@ static void setup_dca_notifier(struct qib_devdata *dd, struct qib_msix_entry *m)
|
||||
int ret;
|
||||
|
||||
m->notifier = n;
|
||||
n->notify.irq = m->irq;
|
||||
n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
|
||||
n->notify.notify = qib_irq_notifier_notify;
|
||||
n->notify.release = qib_irq_notifier_release;
|
||||
n->arg = m->arg;
|
||||
@ -3415,22 +3402,17 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
|
||||
if (!dd->cspec->num_msix_entries) {
|
||||
/* Try to get INTx interrupt */
|
||||
try_intx:
|
||||
if (!dd->pcidev->irq) {
|
||||
qib_dev_err(dd,
|
||||
"irq is 0, BIOS error? Interrupts won't work\n");
|
||||
goto bail;
|
||||
}
|
||||
ret = request_irq(dd->pcidev->irq, qib_7322intr,
|
||||
IRQF_SHARED, QIB_DRV_NAME, dd);
|
||||
ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
|
||||
QIB_DRV_NAME);
|
||||
if (ret) {
|
||||
qib_dev_err(dd,
|
||||
qib_dev_err(
|
||||
dd,
|
||||
"Couldn't setup INTx interrupt (irq=%d): %d\n",
|
||||
dd->pcidev->irq, ret);
|
||||
goto bail;
|
||||
pci_irq_vector(dd->pcidev, 0), ret);
|
||||
return;
|
||||
}
|
||||
dd->cspec->irq = dd->pcidev->irq;
|
||||
dd->cspec->main_int_mask = ~0ULL;
|
||||
goto bail;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try to get MSIx interrupts */
|
||||
@ -3458,10 +3440,6 @@ try_intx:
|
||||
#ifdef CONFIG_INFINIBAND_QIB_DCA
|
||||
int dca = 0;
|
||||
#endif
|
||||
|
||||
dd->cspec->msix_entries[msixnum].
|
||||
name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
|
||||
= '\0';
|
||||
if (i < ARRAY_SIZE(irq_table)) {
|
||||
if (irq_table[i].port) {
|
||||
/* skip if for a non-configured port */
|
||||
@ -3475,11 +3453,10 @@ try_intx:
|
||||
#endif
|
||||
lsb = irq_table[i].lsb;
|
||||
handler = irq_table[i].handler;
|
||||
snprintf(dd->cspec->msix_entries[msixnum].name,
|
||||
sizeof(dd->cspec->msix_entries[msixnum].name)
|
||||
- 1,
|
||||
QIB_DRV_NAME "%d%s", dd->unit,
|
||||
irq_table[i].name);
|
||||
ret = pci_request_irq(dd->pcidev, msixnum, handler,
|
||||
NULL, arg, QIB_DRV_NAME "%d%s",
|
||||
dd->unit,
|
||||
irq_table[i].name);
|
||||
} else {
|
||||
unsigned ctxt;
|
||||
|
||||
@ -3495,37 +3472,25 @@ try_intx:
|
||||
#endif
|
||||
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
|
||||
handler = qib_7322pintr;
|
||||
snprintf(dd->cspec->msix_entries[msixnum].name,
|
||||
sizeof(dd->cspec->msix_entries[msixnum].name)
|
||||
- 1,
|
||||
QIB_DRV_NAME "%d (kctx)", dd->unit);
|
||||
ret = pci_request_irq(dd->pcidev, msixnum, handler,
|
||||
NULL, arg,
|
||||
QIB_DRV_NAME "%d (kctx)",
|
||||
dd->unit);
|
||||
}
|
||||
|
||||
dd->cspec->msix_entries[msixnum].irq = pci_irq_vector(
|
||||
dd->pcidev, msixnum);
|
||||
if (dd->cspec->msix_entries[msixnum].irq < 0) {
|
||||
qib_dev_err(dd,
|
||||
"Couldn't get MSIx irq (vec=%d): %d\n",
|
||||
msixnum,
|
||||
dd->cspec->msix_entries[msixnum].irq);
|
||||
qib_7322_nomsix(dd);
|
||||
goto try_intx;
|
||||
}
|
||||
ret = request_irq(dd->cspec->msix_entries[msixnum].irq,
|
||||
handler, 0,
|
||||
dd->cspec->msix_entries[msixnum].name,
|
||||
arg);
|
||||
if (ret) {
|
||||
/*
|
||||
* Shouldn't happen since the enable said we could
|
||||
* have as many as we are trying to setup here.
|
||||
*/
|
||||
qib_dev_err(dd,
|
||||
"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
|
||||
msixnum,
|
||||
dd->cspec->msix_entries[msixnum].irq,
|
||||
ret);
|
||||
qib_7322_nomsix(dd);
|
||||
"Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
|
||||
msixnum,
|
||||
pci_irq_vector(dd->pcidev, msixnum),
|
||||
ret);
|
||||
qib_7322_free_irq(dd);
|
||||
pci_alloc_irq_vectors(dd->pcidev, 1, 1,
|
||||
PCI_IRQ_LEGACY);
|
||||
goto try_intx;
|
||||
}
|
||||
dd->cspec->msix_entries[msixnum].arg = arg;
|
||||
@ -3559,7 +3524,7 @@ try_intx:
|
||||
dd->cspec->msix_entries[msixnum].mask);
|
||||
}
|
||||
irq_set_affinity_hint(
|
||||
dd->cspec->msix_entries[msixnum].irq,
|
||||
pci_irq_vector(dd->pcidev, msixnum),
|
||||
dd->cspec->msix_entries[msixnum].mask);
|
||||
}
|
||||
msixnum++;
|
||||
@ -3570,7 +3535,6 @@ try_intx:
|
||||
dd->cspec->main_int_mask = mask;
|
||||
tasklet_init(&dd->error_tasklet, qib_error_tasklet,
|
||||
(unsigned long)dd);
|
||||
bail:;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3674,8 +3638,9 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
|
||||
/* no interrupts till re-initted */
|
||||
qib_7322_set_intr_state(dd, 0);
|
||||
|
||||
qib_7322_free_irq(dd);
|
||||
|
||||
if (msix_entries) {
|
||||
qib_7322_nomsix(dd);
|
||||
/* can be up to 512 bytes, too big for stack */
|
||||
msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
|
||||
sizeof(u64), GFP_KERNEL);
|
||||
@ -3765,11 +3730,11 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
|
||||
write_7322_init_portregs(&dd->pport[i]);
|
||||
write_7322_initregs(dd);
|
||||
|
||||
if (qib_pcie_params(dd, dd->lbus_width,
|
||||
&dd->cspec->num_msix_entries))
|
||||
if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
|
||||
qib_dev_err(dd,
|
||||
"Reset failed to setup PCIe or interrupts; continuing anyway\n");
|
||||
|
||||
dd->cspec->num_msix_entries = msix_entries;
|
||||
qib_setup_7322_interrupt(dd, 1);
|
||||
|
||||
for (i = 0; i < dd->num_pports; ++i) {
|
||||
@ -5197,8 +5162,9 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
|
||||
|
||||
qib_devinfo(dd->pcidev,
|
||||
"MSIx interrupt not detected, trying INTx interrupts\n");
|
||||
qib_7322_nomsix(dd);
|
||||
qib_enable_intx(dd);
|
||||
qib_7322_free_irq(dd);
|
||||
if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
|
||||
qib_dev_err(dd, "Failed to enable INTx\n");
|
||||
qib_setup_7322_interrupt(dd, 0);
|
||||
return 1;
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
|
||||
* chip reset (the kernel PCI infrastructure doesn't yet handle that
|
||||
* correctly.
|
||||
*/
|
||||
static void qib_msi_setup(struct qib_devdata *dd, int pos)
|
||||
static void qib_cache_msi_info(struct qib_devdata *dd, int pos)
|
||||
{
|
||||
struct pci_dev *pdev = dd->pcidev;
|
||||
u16 control;
|
||||
@ -208,64 +208,39 @@ static void qib_msi_setup(struct qib_devdata *dd, int pos)
|
||||
&dd->msi_data);
|
||||
}
|
||||
|
||||
static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec)
|
||||
{
|
||||
unsigned int flags = PCI_IRQ_LEGACY;
|
||||
|
||||
/* Check our capabilities */
|
||||
if (dd->pcidev->msix_cap) {
|
||||
flags |= PCI_IRQ_MSIX;
|
||||
} else {
|
||||
if (dd->pcidev->msi_cap) {
|
||||
flags |= PCI_IRQ_MSI;
|
||||
/* Get msi_lo and msi_hi */
|
||||
qib_msi_setup(dd, dd->pcidev->msi_cap);
|
||||
}
|
||||
}
|
||||
|
||||
if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI)))
|
||||
qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
|
||||
|
||||
return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
|
||||
}
|
||||
|
||||
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent)
|
||||
{
|
||||
u16 linkstat, speed;
|
||||
int nvec;
|
||||
int maxvec;
|
||||
int ret = 0;
|
||||
unsigned int flags = PCI_IRQ_MSIX | PCI_IRQ_MSI;
|
||||
|
||||
if (!pci_is_pcie(dd->pcidev)) {
|
||||
qib_dev_err(dd, "Can't find PCI Express capability!\n");
|
||||
/* set up something... */
|
||||
dd->lbus_width = 1;
|
||||
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
|
||||
ret = -1;
|
||||
nvec = -1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (dd->flags & QIB_HAS_INTX)
|
||||
flags |= PCI_IRQ_LEGACY;
|
||||
maxvec = (nent && *nent) ? *nent : 1;
|
||||
nvec = qib_allocate_irqs(dd, maxvec);
|
||||
if (nvec < 0) {
|
||||
ret = nvec;
|
||||
nvec = pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags);
|
||||
if (nvec < 0)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/*
|
||||
* If nent exists, make sure to record how many vectors were allocated
|
||||
* If nent exists, make sure to record how many vectors were allocated.
|
||||
* If msix_enabled is false, return 0 so the fallback code works
|
||||
* correctly.
|
||||
*/
|
||||
if (nent) {
|
||||
*nent = nvec;
|
||||
if (nent)
|
||||
*nent = !dd->pcidev->msix_enabled ? 0 : nvec;
|
||||
|
||||
/*
|
||||
* If we requested (nent) MSIX, but msix_enabled is not set,
|
||||
* pci_alloc_irq_vectors() enabled INTx.
|
||||
*/
|
||||
if (!dd->pcidev->msix_enabled)
|
||||
qib_dev_err(dd,
|
||||
"no msix vectors allocated, using INTx\n");
|
||||
}
|
||||
if (dd->pcidev->msi_enabled)
|
||||
qib_cache_msi_info(dd, dd->pcidev->msi_cap);
|
||||
|
||||
pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat);
|
||||
/*
|
||||
@ -306,7 +281,21 @@ bail:
|
||||
/* fill in string, even on errors */
|
||||
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
|
||||
"PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
|
||||
return ret;
|
||||
return nvec < 0 ? nvec : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qib_free_irq - Cleanup INTx and MSI interrupts
|
||||
* @dd: valid pointer to qib dev data
|
||||
*
|
||||
* Since cleanup for INTx and MSI interrupts is trivial, have a common
|
||||
* routine.
|
||||
*
|
||||
*/
|
||||
void qib_free_irq(struct qib_devdata *dd)
|
||||
{
|
||||
pci_free_irq(dd->pcidev, 0, dd);
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -351,10 +340,10 @@ int qib_reinit_intr(struct qib_devdata *dd)
|
||||
dd->msi_data);
|
||||
ret = 1;
|
||||
bail:
|
||||
if (!ret && (dd->flags & QIB_HAS_INTX)) {
|
||||
qib_enable_intx(dd);
|
||||
qib_free_irq(dd);
|
||||
|
||||
if (!ret && (dd->flags & QIB_HAS_INTX))
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
/* and now set the pci master bit again */
|
||||
pci_set_master(dd->pcidev);
|
||||
@ -362,56 +351,6 @@ bail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable msi interrupt if enabled, and clear msi_lo.
|
||||
* This is used primarily for the fallback to INTx, but
|
||||
* is also used in reinit after reset, and during cleanup.
|
||||
*/
|
||||
void qib_nomsi(struct qib_devdata *dd)
|
||||
{
|
||||
dd->msi_lo = 0;
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as qib_nosmi, but for MSIx.
|
||||
*/
|
||||
void qib_nomsix(struct qib_devdata *dd)
|
||||
{
|
||||
pci_free_irq_vectors(dd->pcidev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Similar to pci_intx(pdev, 1), except that we make sure
|
||||
* msi(x) is off.
|
||||
*/
|
||||
void qib_enable_intx(struct qib_devdata *dd)
|
||||
{
|
||||
u16 cw, new;
|
||||
int pos;
|
||||
struct pci_dev *pdev = dd->pcidev;
|
||||
|
||||
if (pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY) < 0)
|
||||
qib_dev_err(dd, "Failed to enable INTx\n");
|
||||
|
||||
pos = pdev->msi_cap;
|
||||
if (pos) {
|
||||
/* then turn off MSI */
|
||||
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
|
||||
new = cw & ~PCI_MSI_FLAGS_ENABLE;
|
||||
if (new != cw)
|
||||
pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
|
||||
}
|
||||
pos = pdev->msix_cap;
|
||||
if (pos) {
|
||||
/* then turn off MSIx */
|
||||
pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
|
||||
new = cw & ~PCI_MSIX_FLAGS_ENABLE;
|
||||
if (new != cw)
|
||||
pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* These two routines are helper routines for the device reset code
|
||||
* to move all the pcie code out of the chip-specific driver code.
|
||||
|
Loading…
Reference in New Issue
Block a user