mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 16:41:39 +00:00
IB/ipath: kreceive uses portdata rather than devdata
kreceive is now portdata * instead of devdata * and other kreceive related cleanups.... Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
d65708f3a7
commit
c59a80aca0
@ -1104,13 +1104,14 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
|
||||
|
||||
/*
|
||||
* ipath_kreceive - receive a packet
|
||||
* @dd: the infinipath device
|
||||
* @pd: the infinipath port
|
||||
*
|
||||
* called from interrupt handler for errors or receive interrupt
|
||||
*/
|
||||
void ipath_kreceive(struct ipath_devdata *dd)
|
||||
void ipath_kreceive(struct ipath_portdata *pd)
|
||||
{
|
||||
u64 *rc;
|
||||
struct ipath_devdata *dd = pd->port_dd;
|
||||
void *ebuf;
|
||||
const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
|
||||
const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
|
||||
@ -1125,8 +1126,8 @@ void ipath_kreceive(struct ipath_devdata *dd)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
l = dd->ipath_port0head;
|
||||
hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr);
|
||||
l = pd->port_head;
|
||||
hdrqtail = ipath_get_rcvhdrtail(pd);
|
||||
if (l == hdrqtail)
|
||||
goto bail;
|
||||
|
||||
@ -1135,7 +1136,7 @@ reloop:
|
||||
u32 qp;
|
||||
u8 *bthbytes;
|
||||
|
||||
rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2));
|
||||
rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
|
||||
hdr = (struct ipath_message_header *)&rc[1];
|
||||
/*
|
||||
* could make a network order version of IPATH_KD_QP, and
|
||||
@ -1245,7 +1246,7 @@ reloop:
|
||||
* earlier packets, we "almost" guarantee we have covered
|
||||
* that case.
|
||||
*/
|
||||
u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr);
|
||||
u32 hqtail = ipath_get_rcvhdrtail(pd);
|
||||
if (hqtail != hdrqtail) {
|
||||
hdrqtail = hqtail;
|
||||
reloop = 1; /* loop 1 extra time at most */
|
||||
@ -1255,7 +1256,7 @@ reloop:
|
||||
|
||||
pkttot += i;
|
||||
|
||||
dd->ipath_port0head = l;
|
||||
pd->port_head = l;
|
||||
|
||||
if (pkttot > ipath_stats.sps_maxpkts_call)
|
||||
ipath_stats.sps_maxpkts_call = pkttot;
|
||||
@ -1605,7 +1606,8 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
|
||||
|
||||
/* clear for security and sanity on each use */
|
||||
memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
|
||||
memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* tell chip each time we init it, even if we are re-using previous
|
||||
|
@ -742,7 +742,8 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
|
||||
* updated and correct itself, even in the face of software
|
||||
* bugs.
|
||||
*/
|
||||
*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0;
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
|
||||
&dd->ipath_rcvctrl);
|
||||
} else
|
||||
@ -1391,7 +1392,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
|
||||
pollflag = ipath_poll_hdrqfull(pd);
|
||||
|
||||
head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
|
||||
tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr;
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
tail = ipath_get_rcvhdrtail(pd);
|
||||
else
|
||||
tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
|
||||
|
||||
if (head != tail)
|
||||
pollflag |= POLLIN | POLLRDNORM;
|
||||
@ -1932,7 +1936,8 @@ static int ipath_do_user_init(struct file *fp,
|
||||
* We explictly set the in-memory copy to 0 beforehand, so we don't
|
||||
* have to wait to be sure the DMA update has happened.
|
||||
*/
|
||||
*(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL;
|
||||
if (pd->port_rcvhdrtail_kvaddr)
|
||||
ipath_clear_rcvhdrtail(pd);
|
||||
set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
|
||||
&dd->ipath_rcvctrl);
|
||||
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
|
||||
|
@ -526,12 +526,11 @@ static void enable_chip(struct ipath_devdata *dd,
|
||||
*/
|
||||
val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
|
||||
(void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
|
||||
dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
|
||||
|
||||
/* Initialize so we interrupt on next packet received */
|
||||
(void)ipath_write_ureg(dd, ur_rcvhdrhead,
|
||||
dd->ipath_rhdrhead_intr_off |
|
||||
dd->ipath_port0head, 0);
|
||||
dd->ipath_pd[0]->port_head, 0);
|
||||
|
||||
/*
|
||||
* by now pioavail updates to memory should have occurred, so
|
||||
@ -693,7 +692,7 @@ done:
|
||||
*/
|
||||
int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
{
|
||||
int ret = 0, i;
|
||||
int ret = 0;
|
||||
u32 val32, kpiobufs;
|
||||
u32 piobufs, uports;
|
||||
u64 val;
|
||||
@ -750,7 +749,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
|
||||
kpiobufs = ipath_kpiobufs;
|
||||
|
||||
if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
|
||||
i = (int) piobufs -
|
||||
int i = (int) piobufs -
|
||||
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
|
||||
if (i < 0)
|
||||
i = 0;
|
||||
|
@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
for (i = 0; i < dd->ipath_cfgports; i++) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[i];
|
||||
if (i == 0) {
|
||||
hd = dd->ipath_port0head;
|
||||
hd = pd->port_head;
|
||||
tl = (u32) le64_to_cpu(
|
||||
*dd->ipath_hdrqtailptr);
|
||||
} else if (pd && pd->port_cnt &&
|
||||
@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
}
|
||||
}
|
||||
if (errs & INFINIPATH_E_RRCVEGRFULL) {
|
||||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
|
||||
/*
|
||||
* since this is of less importance and not likely to
|
||||
* happen without also getting hdrfull, only count
|
||||
@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
|
||||
* vs user)
|
||||
*/
|
||||
ipath_stats.sps_etidfull++;
|
||||
if (dd->ipath_port0head !=
|
||||
if (pd->port_head !=
|
||||
(u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
|
||||
chkerrpkts = 1;
|
||||
}
|
||||
@ -1173,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data)
|
||||
* for receive are at the bottom.
|
||||
*/
|
||||
if (chk0rcv) {
|
||||
ipath_kreceive(dd);
|
||||
ipath_kreceive(dd->ipath_pd[0]);
|
||||
istat &= ~port0rbits;
|
||||
}
|
||||
|
||||
|
@ -167,6 +167,8 @@ struct ipath_portdata {
|
||||
u32 active_slaves;
|
||||
/* Type of packets or conditions we want to poll for */
|
||||
u16 poll_type;
|
||||
/* port rcvhdrq head offset */
|
||||
u32 port_head;
|
||||
};
|
||||
|
||||
struct sk_buff;
|
||||
@ -314,8 +316,6 @@ struct ipath_devdata {
|
||||
* supports, less gives more pio bufs/port, etc.
|
||||
*/
|
||||
u32 ipath_cfgports;
|
||||
/* port0 rcvhdrq head offset */
|
||||
u32 ipath_port0head;
|
||||
/* count of port 0 hdrqfull errors */
|
||||
u32 ipath_p0_hdrqfull;
|
||||
|
||||
@ -690,7 +690,7 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
|
||||
|
||||
int ipath_parse_ushort(const char *str, unsigned short *valp);
|
||||
|
||||
void ipath_kreceive(struct ipath_devdata *);
|
||||
void ipath_kreceive(struct ipath_portdata *);
|
||||
int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
|
||||
int ipath_reset_device(int);
|
||||
void ipath_get_faststats(unsigned long);
|
||||
@ -928,6 +928,17 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
|
||||
(char __iomem *)dd->ipath_kregbase));
|
||||
}
|
||||
|
||||
static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
|
||||
{
|
||||
*((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
|
||||
}
|
||||
|
||||
static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
|
||||
{
|
||||
return (u32) le64_to_cpu(*((volatile __le64 *)
|
||||
pd->port_rcvhdrtail_kvaddr));
|
||||
}
|
||||
|
||||
/*
|
||||
* sysfs interface.
|
||||
*/
|
||||
|
@ -133,15 +133,16 @@ bail:
|
||||
static void ipath_qcheck(struct ipath_devdata *dd)
|
||||
{
|
||||
static u64 last_tot_hdrqfull;
|
||||
struct ipath_portdata *pd = dd->ipath_pd[0];
|
||||
size_t blen = 0;
|
||||
char buf[128];
|
||||
|
||||
*buf = 0;
|
||||
if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
|
||||
if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
|
||||
blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
|
||||
dd->ipath_pd[0]->port_hdrqfull -
|
||||
pd->port_hdrqfull -
|
||||
dd->ipath_p0_hdrqfull);
|
||||
dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
|
||||
dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
|
||||
}
|
||||
if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
|
||||
blen += snprintf(buf + blen, sizeof buf - blen,
|
||||
@ -173,7 +174,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
||||
if (blen)
|
||||
ipath_dbg("%s\n", buf);
|
||||
|
||||
if (dd->ipath_port0head != (u32)
|
||||
if (pd->port_head != (u32)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr)) {
|
||||
if (dd->ipath_lastport0rcv_cnt ==
|
||||
ipath_stats.sps_port0pkts) {
|
||||
@ -181,7 +182,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
|
||||
"port0 hd=%llx tl=%x; port0pkts %llx\n",
|
||||
(unsigned long long)
|
||||
le64_to_cpu(*dd->ipath_hdrqtailptr),
|
||||
dd->ipath_port0head,
|
||||
pd->port_head,
|
||||
(unsigned long long)
|
||||
ipath_stats.sps_port0pkts);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user