linux/drivers/misc/habanalabs/command_buffer.c
Oded Gabbay 3f5398cfbf habanalabs: improve IOCTLs behavior when disabled or reset
This patch makes some improvement in how IOCTLs behave when the device is
disabled or under reset.

The new code checks, at the start of every IOCTL, if the device is
disabled or in reset. If so, it prints an appropriate kernel message and
returns -EBUSY to user-space.

In addition, the code modifies the location of where the
hard_reset_pending flag is being set or cleared:

1. It is now cleared immediately after the reset *tear-down* flow is
   finished but before the re-initialization flow begins.

2. It is being set in the remove function of the device, to make the
   behavior the same with the hard-reset flow

There are two exceptions to the disable or in reset check:

1. The HL_INFO_DEVICE_STATUS opcode in the INFO IOCTL. This opcode allows
   the user to inquire about the status of the device, whether it is
   operational, in reset or malfunction (disabled). If the driver will
   block this IOCTL, the user won't be able to retrieve the status in
   case of malfunction or in reset.

2. The WAIT_FOR_CS IOCTL. This IOCTL allows the user to inquire about the
   status of a CS. We want to allow the user to continue to do so, even if
   we started a soft-reset process because it will allow the user to get
   the correct error code for each CS he submitted.

Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
2019-04-06 15:41:35 +03:00

453 lines
9.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016-2019 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include <uapi/misc/habanalabs.h>
#include "habanalabs.h"
#include <linux/mm.h>
#include <linux/slab.h>
static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
hdev->asic_funcs->dma_free_coherent(hdev, cb->size,
(void *) (uintptr_t) cb->kernel_address,
cb->bus_address);
kfree(cb);
}
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_pool) {
spin_lock(&hdev->cb_pool_lock);
list_add(&cb->pool_list, &hdev->cb_pool);
spin_unlock(&hdev->cb_pool_lock);
} else {
cb_fini(hdev, cb);
}
}
static void cb_release(struct kref *ref)
{
struct hl_device *hdev;
struct hl_cb *cb;
cb = container_of(ref, struct hl_cb, refcount);
hdev = cb->hdev;
hl_debugfs_remove_cb(cb);
cb_do_release(hdev, cb);
}
static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
int ctx_id)
{
struct hl_cb *cb;
void *p;
/*
* We use of GFP_ATOMIC here because this function can be called from
* the latency-sensitive code path for command submission. Due to H/W
* limitations in some of the ASICs, the kernel must copy the user CB
* that is designated for an external queue and actually enqueue
* the kernel's copy. Hence, we must never sleep in this code section
* and must use GFP_ATOMIC for all memory allocations.
*/
if (ctx_id == HL_KERNEL_ASID_ID)
cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
else
cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return NULL;
if (ctx_id == HL_KERNEL_ASID_ID)
p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
&cb->bus_address, GFP_ATOMIC);
else
p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size,
&cb->bus_address,
GFP_USER | __GFP_ZERO);
if (!p) {
dev_err(hdev->dev,
"failed to allocate %d of dma memory for CB\n",
cb_size);
kfree(cb);
return NULL;
}
cb->kernel_address = (u64) (uintptr_t) p;
cb->size = cb_size;
return cb;
}
int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
u32 cb_size, u64 *handle, int ctx_id)
{
struct hl_cb *cb;
bool alloc_new_cb = true;
int rc;
/*
* Can't use generic function to check this because of special case
* where we create a CB as part of the reset process
*/
if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
(ctx_id != HL_KERNEL_ASID_ID))) {
dev_warn_ratelimited(hdev->dev,
"Device is disabled or in reset. Can't create new CBs\n");
rc = -EBUSY;
goto out_err;
}
if (cb_size > HL_MAX_CB_SIZE) {
dev_err(hdev->dev,
"CB size %d must be less then %d\n",
cb_size, HL_MAX_CB_SIZE);
rc = -EINVAL;
goto out_err;
}
/* Minimum allocation must be PAGE SIZE */
if (cb_size < PAGE_SIZE)
cb_size = PAGE_SIZE;
if (ctx_id == HL_KERNEL_ASID_ID &&
cb_size <= hdev->asic_prop.cb_pool_cb_size) {
spin_lock(&hdev->cb_pool_lock);
if (!list_empty(&hdev->cb_pool)) {
cb = list_first_entry(&hdev->cb_pool, typeof(*cb),
pool_list);
list_del(&cb->pool_list);
spin_unlock(&hdev->cb_pool_lock);
alloc_new_cb = false;
} else {
spin_unlock(&hdev->cb_pool_lock);
dev_dbg(hdev->dev, "CB pool is empty\n");
}
}
if (alloc_new_cb) {
cb = hl_cb_alloc(hdev, cb_size, ctx_id);
if (!cb) {
rc = -ENOMEM;
goto out_err;
}
}
cb->hdev = hdev;
cb->ctx_id = ctx_id;
spin_lock(&mgr->cb_lock);
rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
spin_unlock(&mgr->cb_lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
goto release_cb;
}
cb->id = rc;
kref_init(&cb->refcount);
spin_lock_init(&cb->lock);
/*
* idr is 32-bit so we can safely OR it with a mask that is above
* 32 bit
*/
*handle = cb->id | HL_MMAP_CB_MASK;
*handle <<= PAGE_SHIFT;
hl_debugfs_add_cb(cb);
return 0;
release_cb:
cb_do_release(hdev, cb);
out_err:
*handle = 0;
return rc;
}
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
{
struct hl_cb *cb;
u32 handle;
int rc = 0;
/*
* handle was given to user to do mmap, I need to shift it back to
* how the idr module gave it to me
*/
cb_handle >>= PAGE_SHIFT;
handle = (u32) cb_handle;
spin_lock(&mgr->cb_lock);
cb = idr_find(&mgr->cb_handles, handle);
if (cb) {
idr_remove(&mgr->cb_handles, handle);
spin_unlock(&mgr->cb_lock);
kref_put(&cb->refcount, cb_release);
} else {
spin_unlock(&mgr->cb_lock);
dev_err(hdev->dev,
"CB destroy failed, no match to handle 0x%x\n", handle);
rc = -EINVAL;
}
return rc;
}
int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
u64 handle;
int rc;
if (hl_device_disabled_or_in_reset(hdev)) {
dev_warn_ratelimited(hdev->dev,
"Device is %s. Can't execute CB IOCTL\n",
atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
return -EBUSY;
}
switch (args->in.op) {
case HL_CB_OP_CREATE:
rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size,
&handle, hpriv->ctx->asid);
memset(args, 0, sizeof(*args));
args->out.cb_handle = handle;
break;
case HL_CB_OP_DESTROY:
rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
args->in.cb_handle);
break;
default:
rc = -ENOTTY;
break;
}
return rc;
}
static void cb_vm_close(struct vm_area_struct *vma)
{
struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
long new_mmap_size;
new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
cb->mmap_size = new_mmap_size;
return;
}
spin_lock(&cb->lock);
cb->mmap = false;
spin_unlock(&cb->lock);
hl_cb_put(cb);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct cb_vm_ops = {
.close = cb_vm_close
};
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_cb *cb;
phys_addr_t address;
u32 handle;
int rc;
handle = vma->vm_pgoff;
/* reference was taken here */
cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
if (!cb) {
dev_err(hdev->dev,
"CB mmap failed, no match to handle %d\n", handle);
return -EINVAL;
}
/* Validation check */
if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) {
dev_err(hdev->dev,
"CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
vma->vm_end - vma->vm_start, cb->size);
rc = -EINVAL;
goto put_cb;
}
spin_lock(&cb->lock);
if (cb->mmap) {
dev_err(hdev->dev,
"CB mmap failed, CB already mmaped to user\n");
rc = -EINVAL;
goto release_lock;
}
cb->mmap = true;
spin_unlock(&cb->lock);
vma->vm_ops = &cb_vm_ops;
/*
* Note: We're transferring the cb reference to
* vma->vm_private_data here.
*/
vma->vm_private_data = cb;
/* Calculate address for CB */
address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
address, cb->size);
if (rc) {
spin_lock(&cb->lock);
cb->mmap = false;
goto release_lock;
}
cb->mmap_size = cb->size;
return 0;
release_lock:
spin_unlock(&cb->lock);
put_cb:
hl_cb_put(cb);
return rc;
}
struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
u32 handle)
{
struct hl_cb *cb;
spin_lock(&mgr->cb_lock);
cb = idr_find(&mgr->cb_handles, handle);
if (!cb) {
spin_unlock(&mgr->cb_lock);
dev_warn(hdev->dev,
"CB get failed, no match to handle %d\n", handle);
return NULL;
}
kref_get(&cb->refcount);
spin_unlock(&mgr->cb_lock);
return cb;
}
void hl_cb_put(struct hl_cb *cb)
{
kref_put(&cb->refcount, cb_release);
}
void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
{
spin_lock_init(&mgr->cb_lock);
idr_init(&mgr->cb_handles);
}
void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
{
struct hl_cb *cb;
struct idr *idp;
u32 id;
idp = &mgr->cb_handles;
idr_for_each_entry(idp, cb, id) {
if (kref_put(&cb->refcount, cb_release) != 1)
dev_err(hdev->dev,
"CB %d for CTX ID %d is still alive\n",
id, cb->ctx_id);
}
idr_destroy(&mgr->cb_handles);
}
struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size)
{
u64 cb_handle;
struct hl_cb *cb;
int rc;
rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
HL_KERNEL_ASID_ID);
if (rc) {
dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc);
return NULL;
}
cb_handle >>= PAGE_SHIFT;
cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
/* hl_cb_get should never fail here so use kernel WARN */
WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
if (!cb)
goto destroy_cb;
return cb;
destroy_cb:
hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
return NULL;
}
int hl_cb_pool_init(struct hl_device *hdev)
{
struct hl_cb *cb;
int i;
INIT_LIST_HEAD(&hdev->cb_pool);
spin_lock_init(&hdev->cb_pool_lock);
for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
HL_KERNEL_ASID_ID);
if (cb) {
cb->is_pool = true;
list_add(&cb->pool_list, &hdev->cb_pool);
} else {
hl_cb_pool_fini(hdev);
return -ENOMEM;
}
}
return 0;
}
int hl_cb_pool_fini(struct hl_device *hdev)
{
struct hl_cb *cb, *tmp;
list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
list_del(&cb->pool_list);
cb_fini(hdev, cb);
}
return 0;
}