mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 15:41:36 +00:00
bus: mhi: core: Sanity check values from remote device before use
When parsing the structures in the shared memory, there are values which come from the remote device. For example, a transfer completion event will have a pointer to the tre in the relevant channel's transfer ring. As another example, event ring elements may specify a channel in which the event occurred, however the specified channel value may not be valid as no channel is defined at that index even though the index may be less than the maximum allowed index. Such values should be considered to be untrusted, and validated before use. If we blindly use such values, we may access invalid data or crash if the values are corrupted. If validation fails, drop the relevant event. Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org> Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Reviewed-by: Hemant Kumar <hemantk@codeaurora.org> Link: https://lore.kernel.org/r/1615411855-15053-1-git-send-email-jhugo@codeaurora.org Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
This commit is contained in:
parent
11134390d7
commit
ec32332df7
@ -265,6 +265,11 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
|
||||
{
|
||||
return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
|
||||
}
|
||||
|
||||
int mhi_destroy_device(struct device *dev, void *data)
|
||||
{
|
||||
struct mhi_chan *ul_chan, *dl_chan;
|
||||
@ -427,7 +432,16 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
|
||||
struct mhi_event_ctxt *er_ctxt =
|
||||
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
|
||||
struct mhi_ring *ev_ring = &mhi_event->ring;
|
||||
void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
dma_addr_t ptr = er_ctxt->rp;
|
||||
void *dev_rp;
|
||||
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
|
||||
/* Only proceed if event ring has pending events */
|
||||
if (ev_ring->rp == dev_rp)
|
||||
@ -583,6 +597,11 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_buf_info *buf_info;
|
||||
u16 xfer_len;
|
||||
|
||||
if (!is_valid_ring_ptr(tre_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event element points outside of the tre ring\n");
|
||||
break;
|
||||
}
|
||||
/* Get the TRB this event points to */
|
||||
ev_tre = mhi_to_virtual(tre_ring, ptr);
|
||||
|
||||
@ -745,6 +764,12 @@ static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_chan *mhi_chan;
|
||||
u32 chan;
|
||||
|
||||
if (!is_valid_ring_ptr(mhi_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event element points outside of the cmd ring\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
|
||||
|
||||
chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
|
||||
@ -769,6 +794,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
u32 chan;
|
||||
int count = 0;
|
||||
dma_addr_t ptr = er_ctxt->rp;
|
||||
|
||||
/*
|
||||
* This is a quick check to avoid unnecessary event processing
|
||||
@ -778,7 +804,13 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
||||
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
|
||||
return -EIO;
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
local_rp = ev_ring->rp;
|
||||
|
||||
while (dev_rp != local_rp) {
|
||||
@ -887,6 +919,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
||||
*/
|
||||
if (chan < mhi_cntrl->max_chan) {
|
||||
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
||||
if (!mhi_chan->configured)
|
||||
break;
|
||||
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
|
||||
event_quota--;
|
||||
}
|
||||
@ -898,7 +932,15 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
|
||||
|
||||
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
|
||||
local_rp = ev_ring->rp;
|
||||
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
|
||||
ptr = er_ctxt->rp;
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
count++;
|
||||
}
|
||||
|
||||
@ -921,11 +963,18 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
||||
int count = 0;
|
||||
u32 chan;
|
||||
struct mhi_chan *mhi_chan;
|
||||
dma_addr_t ptr = er_ctxt->rp;
|
||||
|
||||
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
|
||||
return -EIO;
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
local_rp = ev_ring->rp;
|
||||
|
||||
while (dev_rp != local_rp && event_quota > 0) {
|
||||
@ -939,7 +988,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
||||
* Only process the event ring elements whose channel
|
||||
* ID is within the maximum supported range.
|
||||
*/
|
||||
if (chan < mhi_cntrl->max_chan) {
|
||||
if (chan < mhi_cntrl->max_chan &&
|
||||
mhi_cntrl->mhi_chan[chan].configured) {
|
||||
mhi_chan = &mhi_cntrl->mhi_chan[chan];
|
||||
|
||||
if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
|
||||
@ -953,7 +1003,15 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
|
||||
|
||||
mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
|
||||
local_rp = ev_ring->rp;
|
||||
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
|
||||
ptr = er_ctxt->rp;
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
count++;
|
||||
}
|
||||
read_lock_bh(&mhi_cntrl->pm_lock);
|
||||
@ -1455,6 +1513,7 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_ring *ev_ring;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
unsigned long flags;
|
||||
dma_addr_t ptr;
|
||||
|
||||
dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
|
||||
|
||||
@ -1462,7 +1521,15 @@ static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
|
||||
|
||||
/* mark all stale events related to channel as STALE event */
|
||||
spin_lock_irqsave(&mhi_event->lock, flags);
|
||||
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
||||
|
||||
ptr = er_ctxt->rp;
|
||||
if (!is_valid_ring_ptr(ev_ring, ptr)) {
|
||||
dev_err(&mhi_cntrl->mhi_dev->dev,
|
||||
"Event ring rp points outside of the event ring\n");
|
||||
dev_rp = ev_ring->rp;
|
||||
} else {
|
||||
dev_rp = mhi_to_virtual(ev_ring, ptr);
|
||||
}
|
||||
|
||||
local_rp = ev_ring->rp;
|
||||
while (dev_rp != local_rp) {
|
||||
|
Loading…
Reference in New Issue
Block a user