mt76: use readl/writel instead of ioread32/iowrite32
Switching to readl/writel is faster because it gets rid of an unnecessary wrapper with extra checks. Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
parent
cee646d62b
commit
d908d4ec4d
@ -49,10 +49,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
|
||||
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(0, &q->regs->cpu_idx);
|
||||
iowrite32(0, &q->regs->dma_idx);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(0, &q->regs->cpu_idx);
|
||||
writel(0, &q->regs->dma_idx);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -136,11 +136,11 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
static void
|
||||
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
q->head = ioread32(&q->regs->dma_idx);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
q->head = readl(&q->regs->dma_idx);
|
||||
q->tail = q->head;
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -159,7 +159,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
if (flush)
|
||||
last = -1;
|
||||
else
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
|
||||
while (q->queued && q->tail != last) {
|
||||
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
|
||||
@ -181,7 +181,7 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
}
|
||||
|
||||
if (!flush && q->tail == last)
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
}
|
||||
|
||||
if (!flush)
|
||||
@ -251,7 +251,7 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
|
||||
static void
|
||||
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(dev->mmio.regs + offset);
|
||||
val = readl(dev->mmio.regs + offset);
|
||||
trace_reg_rr(dev, offset, val);
|
||||
|
||||
return val;
|
||||
@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
|
||||
{
|
||||
trace_reg_wr(dev, offset, val);
|
||||
iowrite32(val, dev->mmio.regs + offset);
|
||||
writel(val, dev->mmio.regs + offset);
|
||||
}
|
||||
|
||||
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
|
||||
|
@ -1392,11 +1392,11 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
|
||||
continue;
|
||||
|
||||
prev_dma_idx = dev->tx_dma_idx[i];
|
||||
dma_idx = ioread32(&q->regs->dma_idx);
|
||||
dma_idx = readl(&q->regs->dma_idx);
|
||||
dev->tx_dma_idx[i] = dma_idx;
|
||||
|
||||
if (dma_idx == prev_dma_idx &&
|
||||
dma_idx != ioread32(&q->regs->cpu_idx))
|
||||
dma_idx != readl(&q->regs->cpu_idx))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -349,7 +349,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
|
||||
continue;
|
||||
|
||||
prev_dma_idx = dev->mt76.tx_dma_idx[i];
|
||||
dma_idx = ioread32(&q->regs->dma_idx);
|
||||
dma_idx = readl(&q->regs->dma_idx);
|
||||
dev->mt76.tx_dma_idx[i] = dma_idx;
|
||||
|
||||
if (prev_dma_idx == dma_idx)
|
||||
|
Loading…
Reference in New Issue
Block a user