can: rx-offload: Prepare for CAN FD support
The skbs for classic CAN and CAN FD frames are allocated with seperate functions: alloc_can_skb() and alloc_canfd_skb(). In order to support CAN FD frames via the rx-offload helper, the driver itself has to allocate the skb (depending whether it received a classic CAN or CAN FD frame), as the rx-offload helper cannot know which kind of CAN frame the driver has received. This patch moves the allocation of the skb into the struct can_rx_offload::mailbox_read callbacks of the the flexcan and ti_hecc driver and adjusts the rx-offload helper accordingly. Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com> Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
parent
61d2350615
commit
4e9c9484b0
@ -783,16 +783,23 @@ static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *off
|
|||||||
return container_of(offload, struct flexcan_priv, offload);
|
return container_of(offload, struct flexcan_priv, offload);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
|
||||||
struct can_frame *cf,
|
unsigned int n, u32 *timestamp,
|
||||||
u32 *timestamp, unsigned int n)
|
bool drop)
|
||||||
{
|
{
|
||||||
struct flexcan_priv *priv = rx_offload_to_priv(offload);
|
struct flexcan_priv *priv = rx_offload_to_priv(offload);
|
||||||
struct flexcan_regs __iomem *regs = priv->regs;
|
struct flexcan_regs __iomem *regs = priv->regs;
|
||||||
struct flexcan_mb __iomem *mb;
|
struct flexcan_mb __iomem *mb;
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct can_frame *cf;
|
||||||
u32 reg_ctrl, reg_id, reg_iflag1;
|
u32 reg_ctrl, reg_id, reg_iflag1;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (unlikely(drop)) {
|
||||||
|
skb = ERR_PTR(-ENOBUFS);
|
||||||
|
goto mark_as_read;
|
||||||
|
}
|
||||||
|
|
||||||
mb = flexcan_get_mb(priv, n);
|
mb = flexcan_get_mb(priv, n);
|
||||||
|
|
||||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||||
@ -806,7 +813,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||||||
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
|
code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
|
||||||
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
|
if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
|
||||||
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
|
(code != FLEXCAN_MB_CODE_RX_OVERRUN))
|
||||||
return 0;
|
return NULL;
|
||||||
|
|
||||||
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
|
if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
|
||||||
/* This MB was overrun, we lost data */
|
/* This MB was overrun, we lost data */
|
||||||
@ -816,11 +823,17 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||||||
} else {
|
} else {
|
||||||
reg_iflag1 = priv->read(®s->iflag1);
|
reg_iflag1 = priv->read(®s->iflag1);
|
||||||
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
|
if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
|
||||||
return 0;
|
return NULL;
|
||||||
|
|
||||||
reg_ctrl = priv->read(&mb->can_ctrl);
|
reg_ctrl = priv->read(&mb->can_ctrl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb = alloc_can_skb(offload->dev, &cf);
|
||||||
|
if (!skb) {
|
||||||
|
skb = ERR_PTR(-ENOMEM);
|
||||||
|
goto mark_as_read;
|
||||||
|
}
|
||||||
|
|
||||||
/* increase timstamp to full 32 bit */
|
/* increase timstamp to full 32 bit */
|
||||||
*timestamp = reg_ctrl << 16;
|
*timestamp = reg_ctrl << 16;
|
||||||
|
|
||||||
@ -839,7 +852,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||||||
*(__be32 *)(cf->data + i) = data;
|
*(__be32 *)(cf->data + i) = data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark as read */
|
mark_as_read:
|
||||||
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
|
||||||
/* Clear IRQ */
|
/* Clear IRQ */
|
||||||
if (n < 32)
|
if (n < 32)
|
||||||
@ -856,7 +869,7 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
|
|||||||
*/
|
*/
|
||||||
priv->read(®s->timer);
|
priv->read(®s->timer);
|
||||||
|
|
||||||
return 1;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -139,71 +139,35 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
|
|||||||
static struct sk_buff *
|
static struct sk_buff *
|
||||||
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb = NULL, *skb_error = NULL;
|
struct sk_buff *skb;
|
||||||
struct can_rx_offload_cb *cb;
|
struct can_rx_offload_cb *cb;
|
||||||
struct can_frame *cf;
|
bool drop = false;
|
||||||
int ret;
|
u32 timestamp;
|
||||||
|
|
||||||
if (likely(skb_queue_len(&offload->skb_queue) <
|
/* If queue is full drop frame */
|
||||||
offload->skb_queue_len_max)) {
|
if (unlikely(skb_queue_len(&offload->skb_queue) >
|
||||||
skb = alloc_can_skb(offload->dev, &cf);
|
offload->skb_queue_len_max))
|
||||||
if (unlikely(!skb))
|
drop = true;
|
||||||
skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
|
|
||||||
} else {
|
|
||||||
skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If queue is full or skb not available, drop by reading into
|
|
||||||
* overflow buffer.
|
|
||||||
*/
|
|
||||||
if (unlikely(skb_error)) {
|
|
||||||
struct can_frame cf_overflow;
|
|
||||||
u32 timestamp;
|
|
||||||
|
|
||||||
ret = offload->mailbox_read(offload, &cf_overflow,
|
|
||||||
×tamp, n);
|
|
||||||
|
|
||||||
/* Mailbox was empty. */
|
|
||||||
if (unlikely(!ret))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* Mailbox has been read and we're dropping it or
|
|
||||||
* there was a problem reading the mailbox.
|
|
||||||
*
|
|
||||||
* Increment error counters in any case.
|
|
||||||
*/
|
|
||||||
offload->dev->stats.rx_dropped++;
|
|
||||||
offload->dev->stats.rx_fifo_errors++;
|
|
||||||
|
|
||||||
/* There was a problem reading the mailbox, propagate
|
|
||||||
* error value.
|
|
||||||
*/
|
|
||||||
if (unlikely(ret < 0))
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
|
|
||||||
return skb_error;
|
|
||||||
}
|
|
||||||
|
|
||||||
cb = can_rx_offload_get_cb(skb);
|
|
||||||
ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
|
|
||||||
|
|
||||||
|
skb = offload->mailbox_read(offload, n, ×tamp, drop);
|
||||||
/* Mailbox was empty. */
|
/* Mailbox was empty. */
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!skb))
|
||||||
kfree_skb(skb);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
|
|
||||||
/* There was a problem reading the mailbox, propagate error value. */
|
|
||||||
if (unlikely(ret < 0)) {
|
|
||||||
kfree_skb(skb);
|
|
||||||
|
|
||||||
|
/* There was a problem reading the mailbox, propagate
|
||||||
|
* error value.
|
||||||
|
*/
|
||||||
|
if (unlikely(IS_ERR(skb))) {
|
||||||
offload->dev->stats.rx_dropped++;
|
offload->dev->stats.rx_dropped++;
|
||||||
offload->dev->stats.rx_fifo_errors++;
|
offload->dev->stats.rx_fifo_errors++;
|
||||||
|
|
||||||
return ERR_PTR(ret);
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Mailbox was read. */
|
/* Mailbox was read. */
|
||||||
|
cb = can_rx_offload_get_cb(skb);
|
||||||
|
cb->timestamp = timestamp;
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -535,15 +535,28 @@ struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload)
|
|||||||
return container_of(offload, struct ti_hecc_priv, offload);
|
return container_of(offload, struct ti_hecc_priv, offload);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
||||||
struct can_frame *cf,
|
unsigned int mbxno, u32 *timestamp,
|
||||||
u32 *timestamp, unsigned int mbxno)
|
bool drop)
|
||||||
{
|
{
|
||||||
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
|
struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
|
||||||
|
struct sk_buff *skb;
|
||||||
|
struct can_frame *cf;
|
||||||
u32 data, mbx_mask;
|
u32 data, mbx_mask;
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
mbx_mask = BIT(mbxno);
|
mbx_mask = BIT(mbxno);
|
||||||
|
|
||||||
|
if (unlikely(drop)) {
|
||||||
|
skb = ERR_PTR(-ENOBUFS);
|
||||||
|
goto mark_as_read;
|
||||||
|
}
|
||||||
|
|
||||||
|
skb = alloc_can_skb(offload->dev, &cf);
|
||||||
|
if (unlikely(!skb)) {
|
||||||
|
skb = ERR_PTR(-ENOMEM);
|
||||||
|
goto mark_as_read;
|
||||||
|
}
|
||||||
|
|
||||||
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
|
data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
|
||||||
if (data & HECC_CANMID_IDE)
|
if (data & HECC_CANMID_IDE)
|
||||||
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
|
cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
|
||||||
@ -578,11 +591,12 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
|
|||||||
*/
|
*/
|
||||||
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
|
if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
|
||||||
hecc_read(priv, HECC_CANRML) & mbx_mask))
|
hecc_read(priv, HECC_CANRML) & mbx_mask))
|
||||||
ret = -ENOBUFS;
|
skb = ERR_PTR(-ENOBUFS);
|
||||||
|
|
||||||
|
mark_as_read:
|
||||||
hecc_write(priv, HECC_CANRMP, mbx_mask);
|
hecc_write(priv, HECC_CANRMP, mbx_mask);
|
||||||
|
|
||||||
return ret;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ti_hecc_error(struct net_device *ndev, int int_status,
|
static int ti_hecc_error(struct net_device *ndev, int int_status,
|
||||||
|
@ -15,9 +15,9 @@
|
|||||||
struct can_rx_offload {
|
struct can_rx_offload {
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
|
|
||||||
unsigned int (*mailbox_read)(struct can_rx_offload *offload,
|
struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
|
||||||
struct can_frame *cf,
|
unsigned int mb, u32 *timestamp,
|
||||||
u32 *timestamp, unsigned int mb);
|
bool drop);
|
||||||
|
|
||||||
struct sk_buff_head skb_queue;
|
struct sk_buff_head skb_queue;
|
||||||
u32 skb_queue_len_max;
|
u32 skb_queue_len_max;
|
||||||
|
Loading…
Reference in New Issue
Block a user