mei: add wrapper for queuing control commands.

Enclose the boiler plate code of allocating a control/hbm command cb
and enqueueing it onto ctrl_wr.list in a convenient wrapper
mei_cl_enqueue_ctrl_wr_cb().

This is a preparatory patch for enabling consecutive reads.

Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Tomas Winkler 2016-07-26 01:06:05 +03:00 committed by Greg Kroah-Hartman
parent 4034b81ba3
commit 3030dc0564
6 changed files with 61 additions and 39 deletions

View File

@ -188,20 +188,19 @@ out:
* mei_amthif_read_start - queue message for sending read credential
*
* @cl: host client
* @file: file pointer of message recipient
* @fp: file pointer of message recipient
*
* Return: 0 on success, <0 on failure.
*/
static int mei_amthif_read_start(struct mei_cl *cl, const struct file *file)
static int mei_amthif_read_start(struct mei_cl *cl, const struct file *fp)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb;
cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, file);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
cl->rx_flow_ctrl_creds++;
dev->iamthif_state = MEI_IAMTHIF_READING;

View File

@ -235,7 +235,7 @@ static void mei_cl_bus_event_work(struct work_struct *work)
/* Prepare for the next read */
if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
mutex_lock(&bus->device_lock);
mei_cl_read_start(cldev->cl, 0, NULL);
mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
mutex_unlock(&bus->device_lock);
}
}
@ -325,7 +325,7 @@ int mei_cldev_register_event_cb(struct mei_cl_device *cldev,
if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
mutex_lock(&bus->device_lock);
ret = mei_cl_read_start(cldev->cl, 0, NULL);
ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
mutex_unlock(&bus->device_lock);
if (ret && ret != -EBUSY)
return ret;

View File

@ -358,7 +358,8 @@ void mei_io_cb_free(struct mei_cl_cb *cb)
*
* Return: mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
enum mei_cb_file_ops type,
const struct file *fp)
{
struct mei_cl_cb *cb;
@ -430,12 +431,12 @@ static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
* Return: cb on success and NULL on failure
*/
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
cb = mei_io_cb_init(cl, type, fp);
cb = mei_io_cb_init(cl, fop_type, fp);
if (!cb)
return NULL;
@ -452,6 +453,36 @@ struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
return cb;
}
/**
* mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
* and enqueuing of the control commands cb
*
* @cl: host client
* @length: size of the buffer
* @type: operation type
* @fp: associated file pointer (might be NULL)
*
* Return: cb on success and NULL on failure
* Locking: called under "dev->device_lock" lock
*/
struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops fop_type,
const struct file *fp)
{
struct mei_cl_cb *cb;
/* for RX always allocate at least client's mtu */
if (length)
length = max_t(size_t, length, mei_cl_mtu(cl));
cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
if (!cb)
return NULL;
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list);
return cb;
}
/**
* mei_cl_read_cb - find this cl's callback in the read list
* for a specific file
@ -848,13 +879,11 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
cl->state = MEI_FILE_DISCONNECTING;
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
rets = cb ? 0 : -ENOMEM;
if (rets)
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
if (!cb) {
rets = -ENOMEM;
goto out;
cl_dbg(dev, cl, "add disconnect cb to control write list\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
if (mei_hbuf_acquire(dev)) {
rets = mei_cl_send_disconnect(cl, cb);
@ -1023,14 +1052,14 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
*
* @cl: host client
* @me_cl: me client
* @file: pointer to file structure
* @fp: pointer to file structure
*
* Locking: called under "dev->device_lock" lock
*
* Return: 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
const struct file *file)
const struct file *fp)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@ -1057,12 +1086,11 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
goto nortpm;
}
cb = mei_io_cb_init(cl, MEI_FOP_CONNECT, file);
rets = cb ? 0 : -ENOMEM;
if (rets)
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
/* run hbuf acquire last so we don't have to undo */
if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
@ -1265,7 +1293,7 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
* mei_cl_notify_request - send notification stop/start request
*
* @cl: host client
* @file: associate request with file
* @fp: associate request with file
* @request: 1 for start or 0 for stop
*
* Locking: called under "dev->device_lock" lock
@ -1273,7 +1301,7 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
* Return: 0 on such and error otherwise.
*/
int mei_cl_notify_request(struct mei_cl *cl,
const struct file *file, u8 request)
const struct file *fp, u8 request)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
@ -1298,7 +1326,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
}
fop_type = mei_cl_notify_req2fop(request);
cb = mei_io_cb_init(cl, fop_type, file);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
if (!cb) {
rets = -ENOMEM;
goto out;
@ -1309,9 +1337,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
rets = -ENODEV;
goto out;
}
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
}
mutex_unlock(&dev->device_lock);
@ -1443,14 +1469,10 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
if (cl->rx_flow_ctrl_creds)
return -EBUSY;
/* always allocate at least client max message */
length = max_t(size_t, length, mei_cl_mtu(cl));
cb = mei_cl_alloc_cb(cl, length, MEI_FOP_READ, fp);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
if (!cb)
return -ENOMEM;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);

View File

@ -82,8 +82,6 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
/*
* MEI IO Functions
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, enum mei_cb_file_ops type,
const struct file *fp);
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
/**
@ -116,6 +114,9 @@ void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
const struct file *fp);
struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
enum mei_cb_file_ops type,
const struct file *fp);
int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp);
/*

View File

@ -871,10 +871,10 @@ static int mei_hbm_fw_disconnect_req(struct mei_device *dev,
cl->state = MEI_FILE_DISCONNECTING;
cl->timer_count = 0;
cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT_RSP, NULL);
cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT_RSP,
NULL);
if (!cb)
return -ENOMEM;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
return 0;
}

View File

@ -628,7 +628,7 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
if (!list_empty(&cl->rd_completed))
mask |= POLLIN | POLLRDNORM;
else
mei_cl_read_start(cl, 0, file);
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
out: