forked from Minki/linux
dmaengine: idxd: remove interrupt disable for dev_lock
The spinlock is not being used in hard interrupt context. There is no need to disable irq when acquiring the lock. The interrupt thread handler also is not in bottom half context, therefore we can also remove disabling of the bh. Convert all dev_lock acquisition to plain spin_lock() calls. Reviewed-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Link: https://lore.kernel.org/r/162984026772.1939166.11504067782824765879.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
f9f4082dbc
commit
cf84a4b968
@ -218,14 +218,13 @@ static __poll_t idxd_cdev_poll(struct file *filp,
|
||||
struct idxd_user_context *ctx = filp->private_data;
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
unsigned long flags;
|
||||
__poll_t out = 0;
|
||||
|
||||
poll_wait(filp, &wq->err_queue, wait);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
if (idxd->sw_err.valid)
|
||||
out = EPOLLIN | EPOLLRDNORM;
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
@ -341,19 +341,18 @@ int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
|
||||
int rc;
|
||||
union wqcfg wqcfg;
|
||||
unsigned int offset;
|
||||
unsigned long flags;
|
||||
|
||||
rc = idxd_wq_disable(wq, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
|
||||
wqcfg.pasid_en = 1;
|
||||
wqcfg.pasid = pasid;
|
||||
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
|
||||
rc = idxd_wq_enable(wq);
|
||||
if (rc < 0)
|
||||
@ -368,19 +367,18 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
|
||||
int rc;
|
||||
union wqcfg wqcfg;
|
||||
unsigned int offset;
|
||||
unsigned long flags;
|
||||
|
||||
rc = idxd_wq_disable(wq, false);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
|
||||
wqcfg.pasid_en = 0;
|
||||
wqcfg.pasid = 0;
|
||||
iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
|
||||
rc = idxd_wq_enable(wq);
|
||||
if (rc < 0)
|
||||
@ -558,7 +556,6 @@ int idxd_device_disable(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
u32 status;
|
||||
unsigned long flags;
|
||||
|
||||
if (!idxd_is_enabled(idxd)) {
|
||||
dev_dbg(dev, "Device is not enabled\n");
|
||||
@ -574,22 +571,20 @@ int idxd_device_disable(struct idxd_device *idxd)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_device_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_device_reset(struct idxd_device *idxd)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_device_clear_state(idxd);
|
||||
idxd->state = IDXD_DEV_DISABLED;
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
}
|
||||
|
||||
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
|
||||
@ -1164,7 +1159,6 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
unsigned long flags;
|
||||
int rc = -ENXIO;
|
||||
|
||||
lockdep_assert_held(&wq->wq_lock);
|
||||
@ -1216,10 +1210,10 @@ int __drv_enable_wq(struct idxd_wq *wq)
|
||||
}
|
||||
|
||||
rc = 0;
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
rc = idxd_device_config(idxd);
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
if (rc < 0) {
|
||||
dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
|
||||
goto err;
|
||||
@ -1288,7 +1282,6 @@ void drv_disable_wq(struct idxd_wq *wq)
|
||||
int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
|
||||
{
|
||||
struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
@ -1302,10 +1295,10 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
|
||||
}
|
||||
|
||||
/* Device configuration */
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||
rc = idxd_device_config(idxd);
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
if (rc < 0)
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -64,7 +64,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
bool err = false;
|
||||
|
||||
if (cause & IDXD_INTC_ERR) {
|
||||
spin_lock_bh(&idxd->dev_lock);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
for (i = 0; i < 4; i++)
|
||||
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
|
||||
IDXD_SWERR_OFFSET + i * sizeof(u64));
|
||||
@ -89,7 +89,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&idxd->dev_lock);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
val |= IDXD_INTC_ERR;
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
@ -133,7 +133,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
INIT_WORK(&idxd->work, idxd_device_reinit);
|
||||
queue_work(idxd->wq, &idxd->work);
|
||||
} else {
|
||||
spin_lock_bh(&idxd->dev_lock);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
idxd_wqs_quiesce(idxd);
|
||||
idxd_wqs_unmap_portal(idxd);
|
||||
idxd_device_clear_state(idxd);
|
||||
@ -141,7 +141,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
"idxd halted, need %s.\n",
|
||||
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
||||
"FLR" : "system reset");
|
||||
spin_unlock_bh(&idxd->dev_lock);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
@ -1099,16 +1099,15 @@ static ssize_t clients_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
unsigned long flags;
|
||||
int count = 0, i;
|
||||
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
for (i = 0; i < idxd->max_wqs; i++) {
|
||||
struct idxd_wq *wq = idxd->wqs[i];
|
||||
|
||||
count += wq->client_count;
|
||||
}
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
|
||||
return sysfs_emit(buf, "%d\n", count);
|
||||
}
|
||||
@ -1146,12 +1145,11 @@ static ssize_t errors_show(struct device *dev,
|
||||
{
|
||||
struct idxd_device *idxd = confdev_to_idxd(dev);
|
||||
int i, out = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
spin_lock(&idxd->dev_lock);
|
||||
for (i = 0; i < 4; i++)
|
||||
out += sysfs_emit_at(buf, out, "%#018llx ", idxd->sw_err.bits[i]);
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
spin_unlock(&idxd->dev_lock);
|
||||
out--;
|
||||
out += sysfs_emit_at(buf, out, "\n");
|
||||
return out;
|
||||
|
Loading…
Reference in New Issue
Block a user