forked from Minki/linux
drm/nouveau/flcn/qmgr: move sequence tracking from nvkm_msgqueue to nvkm_falcon_qmgr
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
22431189d6
commit
0ae59432ba
@ -164,7 +164,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
|
||||
if (IS_ERR(queue))
|
||||
return PTR_ERR(queue);
|
||||
|
||||
seq = msgqueue_seq_acquire(priv);
|
||||
seq = nvkm_falcon_qmgr_seq_acquire(queue->qmgr);
|
||||
if (IS_ERR(seq))
|
||||
return PTR_ERR(seq);
|
||||
|
||||
@ -178,7 +178,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
|
||||
ret = cmd_write(priv, cmd, queue);
|
||||
if (ret) {
|
||||
seq->state = SEQ_STATE_PENDING;
|
||||
msgqueue_seq_release(priv, seq);
|
||||
nvkm_falcon_qmgr_seq_release(queue->qmgr, seq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -134,12 +134,14 @@ close:
|
||||
}
|
||||
|
||||
static int
|
||||
msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
|
||||
msgqueue_msg_handle(struct nvkm_msgqueue *priv,
|
||||
struct nvkm_falcon_msgq *msgq,
|
||||
struct nvkm_msgqueue_hdr *hdr)
|
||||
{
|
||||
const struct nvkm_subdev *subdev = priv->falcon->owner;
|
||||
struct nvkm_msgqueue_seq *seq;
|
||||
|
||||
seq = &priv->seq[hdr->seq_id];
|
||||
seq = &msgq->qmgr->seq[hdr->seq_id];
|
||||
if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
|
||||
nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
|
||||
return -EINVAL;
|
||||
@ -153,7 +155,7 @@ msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
|
||||
if (seq->completion)
|
||||
complete(seq->completion);
|
||||
|
||||
msgqueue_seq_release(priv, seq);
|
||||
nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -211,7 +213,7 @@ nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
|
||||
priv->init_msg_received = true;
|
||||
} else {
|
||||
while (msg_queue_read(priv, queue, hdr) > 0)
|
||||
msgqueue_msg_handle(priv, hdr);
|
||||
msgqueue_msg_handle(priv, queue, hdr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,13 +136,8 @@ nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
|
||||
struct nvkm_falcon *falcon,
|
||||
struct nvkm_msgqueue *queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
queue->func = func;
|
||||
queue->falcon = falcon;
|
||||
mutex_init(&queue->seq_lock);
|
||||
for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
|
||||
queue->seq[i].id = i;
|
||||
|
||||
init_completion(&queue->init_done);
|
||||
|
||||
|
@ -144,36 +144,6 @@ struct nvkm_msgqueue_queue {
|
||||
u32 tail_reg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvkm_msgqueue_seq - keep track of ongoing commands
|
||||
*
|
||||
* Every time a command is sent, a sequence is assigned to it so the
|
||||
* corresponding message can be matched. Upon receiving the message, a callback
|
||||
* can be called and/or a completion signaled.
|
||||
*
|
||||
* @id: sequence ID
|
||||
* @state: current state
|
||||
* @callback: callback to call upon receiving matching message
|
||||
* @completion: completion to signal after callback is called
|
||||
*/
|
||||
struct nvkm_msgqueue_seq {
|
||||
u16 id;
|
||||
enum {
|
||||
SEQ_STATE_FREE = 0,
|
||||
SEQ_STATE_PENDING,
|
||||
SEQ_STATE_USED,
|
||||
SEQ_STATE_CANCELLED
|
||||
} state;
|
||||
nvkm_msgqueue_callback callback;
|
||||
struct completion *completion;
|
||||
};
|
||||
|
||||
/*
|
||||
* We can have an arbitrary number of sequences, but realistically we will
|
||||
* probably not use that much simultaneously.
|
||||
*/
|
||||
#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
|
||||
|
||||
/**
|
||||
* struct nvkm_msgqueue - manage a command/message based FW on a falcon
|
||||
*
|
||||
@ -181,9 +151,6 @@ struct nvkm_msgqueue_seq {
|
||||
* @func: implementation of the firmware to use
|
||||
* @init_msg_received: whether the init message has already been received
|
||||
* @init_done: whether all init is complete and commands can be processed
|
||||
* @seq_lock: protects seq and seq_tbl
|
||||
* @seq: sequences to match commands and messages
|
||||
* @seq_tbl: bitmap of sequences currently in use
|
||||
*/
|
||||
struct nvkm_msgqueue {
|
||||
struct nvkm_falcon *falcon;
|
||||
@ -191,10 +158,6 @@ struct nvkm_msgqueue {
|
||||
u32 fw_version;
|
||||
bool init_msg_received;
|
||||
struct completion init_done;
|
||||
|
||||
struct mutex seq_lock;
|
||||
struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
|
||||
unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
|
||||
};
|
||||
|
||||
void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include "qmgr.h"
|
||||
|
||||
struct nvkm_msgqueue_seq *
|
||||
msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
|
||||
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *priv)
|
||||
{
|
||||
const struct nvkm_subdev *subdev = priv->falcon->owner;
|
||||
struct nvkm_msgqueue_seq *seq;
|
||||
@ -46,7 +46,8 @@ msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
|
||||
}
|
||||
|
||||
void
|
||||
msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
|
||||
nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *priv,
|
||||
struct nvkm_msgqueue_seq *seq)
|
||||
{
|
||||
/* no need to acquire seq_lock since clear_bit is atomic */
|
||||
seq->state = SEQ_STATE_FREE;
|
||||
@ -70,10 +71,15 @@ nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
|
||||
struct nvkm_falcon_qmgr **pqmgr)
|
||||
{
|
||||
struct nvkm_falcon_qmgr *qmgr;
|
||||
int i;
|
||||
|
||||
if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
qmgr->falcon = falcon;
|
||||
mutex_init(&qmgr->seq_lock);
|
||||
for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
|
||||
qmgr->seq[i].id = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -9,12 +9,48 @@
|
||||
/* max size of the messages we can receive */
|
||||
#define MSG_BUF_SIZE 128
|
||||
|
||||
struct nvkm_falcon_qmgr {
|
||||
struct nvkm_falcon *falcon;
|
||||
/**
|
||||
* struct nvkm_msgqueue_seq - keep track of ongoing commands
|
||||
*
|
||||
* Every time a command is sent, a sequence is assigned to it so the
|
||||
* corresponding message can be matched. Upon receiving the message, a callback
|
||||
* can be called and/or a completion signaled.
|
||||
*
|
||||
* @id: sequence ID
|
||||
* @state: current state
|
||||
* @callback: callback to call upon receiving matching message
|
||||
* @completion: completion to signal after callback is called
|
||||
*/
|
||||
struct nvkm_msgqueue_seq {
|
||||
u16 id;
|
||||
enum {
|
||||
SEQ_STATE_FREE = 0,
|
||||
SEQ_STATE_PENDING,
|
||||
SEQ_STATE_USED,
|
||||
SEQ_STATE_CANCELLED
|
||||
} state;
|
||||
nvkm_msgqueue_callback callback;
|
||||
struct completion *completion;
|
||||
};
|
||||
|
||||
struct nvkm_msgqueue_seq *msgqueue_seq_acquire(struct nvkm_msgqueue *);
|
||||
void msgqueue_seq_release(struct nvkm_msgqueue *, struct nvkm_msgqueue_seq *);
|
||||
/*
|
||||
* We can have an arbitrary number of sequences, but realistically we will
|
||||
* probably not use that much simultaneously.
|
||||
*/
|
||||
#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
|
||||
|
||||
struct nvkm_falcon_qmgr {
|
||||
struct nvkm_falcon *falcon;
|
||||
|
||||
struct mutex seq_lock;
|
||||
struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
|
||||
unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
|
||||
};
|
||||
|
||||
struct nvkm_msgqueue_seq *
|
||||
nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
|
||||
void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
|
||||
struct nvkm_msgqueue_seq *);
|
||||
|
||||
#define FLCNQ_PRINTK(t,q,f,a...) \
|
||||
FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
|
||||
|
Loading…
Reference in New Issue
Block a user