mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
greybus: operation: fix connection tear down
Fix connection tear down, which was done in an unsafe way that could result in use-after-free as the per-connection list of operations was iterated without any locking or refcounting. Specifically, the operations list was iterated without holding any locks or operation refcounts even though operations were being both removed from the list and deallocated during per-operation cancellation. Any operation completing during tear down could also cause corruption. Change the per-connection operation list to only contain active operations and use the recently introduced active counter to maintain the list. Add new helper that is called on connection tear down to cancel all outstanding operations in a safe way by using proper locks and making sure to hold a reference to any operation being cancelled. Note that by verifying the connection state before incrementing the active count we can make sure that all active operations have been cancelled and that no new ones have been started when the helper returns. Signed-off-by: Johan Hovold <johan@hovoldconsulting.com> Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
parent
cad09a8f8c
commit
008974cb52
@ -221,23 +221,46 @@ struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
|
||||
return connection;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel all active operations on a connection.
|
||||
*
|
||||
* Should only be called during connection tear down.
|
||||
*/
|
||||
static void gb_connection_cancel_operations(struct gb_connection *connection,
|
||||
int errno)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
|
||||
WARN_ON(!list_empty(&connection->operations));
|
||||
|
||||
while (!list_empty(&connection->operations)) {
|
||||
operation = list_last_entry(&connection->operations,
|
||||
struct gb_operation, links);
|
||||
gb_operation_get(operation);
|
||||
spin_unlock_irq(&connection->lock);
|
||||
|
||||
gb_operation_cancel(operation, errno);
|
||||
gb_operation_put(operation);
|
||||
|
||||
spin_lock_irq(&connection->lock);
|
||||
}
|
||||
spin_unlock_irq(&connection->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down a previously set up connection.
|
||||
*/
|
||||
void gb_connection_destroy(struct gb_connection *connection)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
struct gb_operation *next;
|
||||
struct ida *id_map;
|
||||
|
||||
if (WARN_ON(!connection))
|
||||
return;
|
||||
|
||||
if (WARN_ON(!list_empty(&connection->operations))) {
|
||||
list_for_each_entry_safe(operation, next,
|
||||
&connection->operations, links)
|
||||
gb_operation_cancel(operation, -ESHUTDOWN);
|
||||
}
|
||||
gb_connection_cancel_operations(connection, -ESHUTDOWN);
|
||||
|
||||
spin_lock_irq(&gb_connections_lock);
|
||||
list_del(&connection->bundle_links);
|
||||
list_del(&connection->hd_links);
|
||||
|
@ -37,9 +37,9 @@ struct gb_connection {
|
||||
|
||||
spinlock_t lock;
|
||||
enum gb_connection_state state;
|
||||
struct list_head operations;
|
||||
|
||||
atomic_t op_cycle;
|
||||
struct list_head operations;
|
||||
|
||||
void *private;
|
||||
};
|
||||
|
@ -29,32 +29,65 @@ static struct workqueue_struct *gb_operation_workqueue;
|
||||
static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
|
||||
|
||||
/*
|
||||
* Protects access to connection operations lists, as well as
|
||||
* updates to operation->errno.
|
||||
* Protects updates to operation->errno.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(gb_operations_lock);
|
||||
|
||||
static int gb_operation_response_send(struct gb_operation *operation,
|
||||
int errno);
|
||||
|
||||
/* Caller holds operation reference. */
|
||||
static inline void gb_operation_get_active(struct gb_operation *operation)
|
||||
/*
|
||||
* Increment operation active count and add to connection list unless the
|
||||
* connection is going away.
|
||||
*
|
||||
* Caller holds operation reference.
|
||||
*/
|
||||
static int gb_operation_get_active(struct gb_operation *operation)
|
||||
{
|
||||
atomic_inc(&operation->active);
|
||||
struct gb_connection *connection = operation->connection;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
|
||||
if (connection->state != GB_CONNECTION_STATE_ENABLED) {
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
if (operation->active++ == 0)
|
||||
list_add_tail(&operation->links, &connection->operations);
|
||||
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Caller holds operation reference. */
|
||||
static inline void gb_operation_put_active(struct gb_operation *operation)
|
||||
static void gb_operation_put_active(struct gb_operation *operation)
|
||||
{
|
||||
if (atomic_dec_and_test(&operation->active)) {
|
||||
struct gb_connection *connection = operation->connection;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
if (--operation->active == 0) {
|
||||
list_del(&operation->links);
|
||||
if (atomic_read(&operation->waiters))
|
||||
wake_up(&gb_operation_cancellation_queue);
|
||||
}
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
}
|
||||
|
||||
static inline bool gb_operation_is_active(struct gb_operation *operation)
|
||||
static bool gb_operation_is_active(struct gb_operation *operation)
|
||||
{
|
||||
return atomic_read(&operation->active);
|
||||
struct gb_connection *connection = operation->connection;
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
ret = operation->active;
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -150,7 +183,7 @@ gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
|
||||
unsigned long flags;
|
||||
bool found = false;
|
||||
|
||||
spin_lock_irqsave(&gb_operations_lock, flags);
|
||||
spin_lock_irqsave(&connection->lock, flags);
|
||||
list_for_each_entry(operation, &connection->operations, links)
|
||||
if (operation->id == operation_id &&
|
||||
!gb_operation_is_incoming(operation)) {
|
||||
@ -158,7 +191,7 @@ gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&gb_operations_lock, flags);
|
||||
spin_unlock_irqrestore(&connection->lock, flags);
|
||||
|
||||
return found ? operation : NULL;
|
||||
}
|
||||
@ -453,7 +486,6 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
|
||||
{
|
||||
struct greybus_host_device *hd = connection->hd;
|
||||
struct gb_operation *operation;
|
||||
unsigned long flags;
|
||||
|
||||
operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
|
||||
if (!operation)
|
||||
@ -479,13 +511,8 @@ gb_operation_create_common(struct gb_connection *connection, u8 type,
|
||||
INIT_WORK(&operation->work, gb_operation_work);
|
||||
init_completion(&operation->completion);
|
||||
kref_init(&operation->kref);
|
||||
atomic_set(&operation->active, 0);
|
||||
atomic_set(&operation->waiters, 0);
|
||||
|
||||
spin_lock_irqsave(&gb_operations_lock, flags);
|
||||
list_add_tail(&operation->links, &connection->operations);
|
||||
spin_unlock_irqrestore(&gb_operations_lock, flags);
|
||||
|
||||
return operation;
|
||||
|
||||
err_request:
|
||||
@ -570,10 +597,6 @@ static void _gb_operation_destroy(struct kref *kref)
|
||||
|
||||
operation = container_of(kref, struct gb_operation, kref);
|
||||
|
||||
/* XXX Make sure it's not in flight */
|
||||
list_del(&operation->links);
|
||||
spin_unlock(&gb_operations_lock);
|
||||
|
||||
if (operation->response)
|
||||
gb_operation_message_free(operation->response);
|
||||
gb_operation_message_free(operation->request);
|
||||
@ -590,8 +613,7 @@ void gb_operation_put(struct gb_operation *operation)
|
||||
if (WARN_ON(!operation))
|
||||
return;
|
||||
|
||||
kref_put_spinlock_irqsave(&operation->kref, _gb_operation_destroy,
|
||||
&gb_operations_lock);
|
||||
kref_put(&operation->kref, _gb_operation_destroy);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gb_operation_put);
|
||||
|
||||
@ -621,15 +643,14 @@ int gb_operation_request_send(struct gb_operation *operation,
|
||||
if (!callback)
|
||||
return -EINVAL;
|
||||
|
||||
if (connection->state != GB_CONNECTION_STATE_ENABLED)
|
||||
return -ENOTCONN;
|
||||
|
||||
/*
|
||||
* First, get an extra reference on the operation.
|
||||
* It'll be dropped when the operation completes.
|
||||
*/
|
||||
gb_operation_get(operation);
|
||||
gb_operation_get_active(operation);
|
||||
ret = gb_operation_get_active(operation);
|
||||
if (ret)
|
||||
goto err_put;
|
||||
|
||||
/*
|
||||
* Record the callback function, which is executed in
|
||||
@ -651,10 +672,15 @@ int gb_operation_request_send(struct gb_operation *operation,
|
||||
gb_operation_result_set(operation, -EINPROGRESS);
|
||||
|
||||
ret = gb_message_send(operation->request, gfp);
|
||||
if (ret) {
|
||||
gb_operation_put_active(operation);
|
||||
gb_operation_put(operation);
|
||||
}
|
||||
if (ret)
|
||||
goto err_put_active;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_active:
|
||||
gb_operation_put_active(operation);
|
||||
err_put:
|
||||
gb_operation_put(operation);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -705,9 +731,6 @@ static int gb_operation_response_send(struct gb_operation *operation,
|
||||
struct gb_connection *connection = operation->connection;
|
||||
int ret;
|
||||
|
||||
if (connection->state != GB_CONNECTION_STATE_ENABLED)
|
||||
return -ENOTCONN;
|
||||
|
||||
if (!operation->response &&
|
||||
!gb_operation_is_unidirectional(operation)) {
|
||||
if (!gb_operation_response_alloc(operation, 0))
|
||||
@ -726,16 +749,23 @@ static int gb_operation_response_send(struct gb_operation *operation,
|
||||
|
||||
/* Reference will be dropped when message has been sent. */
|
||||
gb_operation_get(operation);
|
||||
gb_operation_get_active(operation);
|
||||
ret = gb_operation_get_active(operation);
|
||||
if (ret)
|
||||
goto err_put;
|
||||
|
||||
/* Fill in the response header and send it */
|
||||
operation->response->header->result = gb_operation_errno_map(errno);
|
||||
|
||||
ret = gb_message_send(operation->response, GFP_KERNEL);
|
||||
if (ret) {
|
||||
gb_operation_put_active(operation);
|
||||
gb_operation_put(operation);
|
||||
}
|
||||
if (ret)
|
||||
goto err_put_active;
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_active:
|
||||
gb_operation_put_active(operation);
|
||||
err_put:
|
||||
gb_operation_put(operation);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -785,6 +815,7 @@ static void gb_connection_recv_request(struct gb_connection *connection,
|
||||
void *data, size_t size)
|
||||
{
|
||||
struct gb_operation *operation;
|
||||
int ret;
|
||||
|
||||
operation = gb_operation_create_incoming(connection, operation_id,
|
||||
type, data, size);
|
||||
@ -793,7 +824,11 @@ static void gb_connection_recv_request(struct gb_connection *connection,
|
||||
return; /* XXX Respond with pre-allocated ENOMEM */
|
||||
}
|
||||
|
||||
gb_operation_get_active(operation);
|
||||
ret = gb_operation_get_active(operation);
|
||||
if (ret) {
|
||||
gb_operation_put(operation);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The initial reference to the operation will be dropped when the
|
||||
|
@ -127,9 +127,9 @@ struct gb_operation {
|
||||
struct completion completion;
|
||||
|
||||
struct kref kref;
|
||||
atomic_t active;
|
||||
atomic_t waiters;
|
||||
|
||||
int active;
|
||||
struct list_head links; /* connection->operations */
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user