mirror of
https://github.com/torvalds/linux.git
synced 2024-11-05 11:32:04 +00:00
43452fadd6
Fix the potential risk when enable config NET_DMA and ASYNC_TX. Async_tx is lack of support in current release process of dma descriptor, all descriptors will be released whatever is acked or no-acked by async_tx, so there is a potential race condition when dma engine is uesd by others clients (e.g. when enable NET_DMA to offload TCP). In our case, a race condition which is raised when use both of talitos and dmaengine to offload xor is because napi scheduler will sync all pending requests in dma channels, it affects the process of raid operations due to ack_tx is not checked in fsl dma. The no-acked descriptor is freed which is submitted just now, as a dependent tx, this freed descriptor trigger BUG_ON(async_tx_test_ack(depend_tx)) in async_tx_submit(). TASK = ee1a94a0[1390] 'md0_raid5' THREAD: ecf40000 CPU: 0 GPR00: 00000001 ecf41ca0 ee44/921a94a0 0000003f 00000001 c00593e4 00000000 00000001 GPR08: 00000000 a7a7a7a7 00000001 045/920000002 42028042 100a38d4 ed576d98 00000000 GPR16: ed5a11b0 00000000 2b162000 00000200 046/920000000 2d555000 ed3015e8 c15a7aa0 GPR24: 00000000 c155fc40 00000000 ecb63220 ecf41d28 e47/92f640bb0 ef640c30 ecf41ca0 NIP [c02b048c] async_tx_submit+0x6c/0x2b4 LR [c02b068c] async_tx_submit+0x26c/0x2b4 Call Trace: [ecf41ca0] [c02b068c] async_tx_submit+0x26c/0x2b448/92 (unreliable) [ecf41cd0] [c02b0a4c] async_memcpy+0x240/0x25c [ecf41d20] [c0421064] async_copy_data+0xa0/0x17c [ecf41d70] [c0421cf4] __raid_run_ops+0x874/0xe10 [ecf41df0] [c0426ee4] handle_stripe+0x820/0x25e8 [ecf41e90] [c0429080] raid5d+0x3d4/0x5b4 [ecf41f40] [c04329b8] md_thread+0x138/0x16c [ecf41f90] [c008277c] kthread+0x8c/0x90 [ecf41ff0] [c0011630] kernel_thread+0x4c/0x68 Another modification in this patch is the change of completed descriptors, there is a potential risk which caused by exception interrupt, all descriptors in ld_running list are seemed completed when an interrupt raised, it works fine under normal condition, but if there is an exception occured, it cannot work as our excepted. Hardware should not be depend on s/w list, the right way is to read current descriptor address register to find the last completed descriptor. If an interrupt is raised by an error, all descriptors in ld_running should not be seemed finished, or these unfinished descriptors in ld_running will be released wrongly. A simple way to reproduce: Enable dmatest first, then insert some bad descriptors which can trigger Programming Error interrupts before the good descriptors. Last, the good descriptors will be freed before they are processsed because of the exception intrerrupt. Note: the bad descriptors are only for simulating an exception interrupt. This case can illustrate the potential risk in current fsl-dma very well. Signed-off-by: Hongbo Zhang <hongbo.zhang@freescale.com> Signed-off-by: Qiang Liu <qiang.liu@freescale.com> Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
235 lines
7.0 KiB
C
235 lines
7.0 KiB
C
/*
|
|
* Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
|
|
*
|
|
* Author:
|
|
* Zhang Wei <wei.zhang@freescale.com>, Jul 2007
|
|
* Ebony Zhu <ebony.zhu@freescale.com>, May 2007
|
|
*
|
|
* This is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
*/
|
|
#ifndef __DMA_FSLDMA_H
|
|
#define __DMA_FSLDMA_H
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/dmapool.h>
|
|
#include <linux/dmaengine.h>
|
|
|
|
/* Define data structures needed by Freescale
|
|
* MPC8540 and MPC8349 DMA controller.
|
|
*/
|
|
#define FSL_DMA_MR_CS 0x00000001
|
|
#define FSL_DMA_MR_CC 0x00000002
|
|
#define FSL_DMA_MR_CA 0x00000008
|
|
#define FSL_DMA_MR_EIE 0x00000040
|
|
#define FSL_DMA_MR_XFE 0x00000020
|
|
#define FSL_DMA_MR_EOLNIE 0x00000100
|
|
#define FSL_DMA_MR_EOLSIE 0x00000080
|
|
#define FSL_DMA_MR_EOSIE 0x00000200
|
|
#define FSL_DMA_MR_CDSM 0x00000010
|
|
#define FSL_DMA_MR_CTM 0x00000004
|
|
#define FSL_DMA_MR_EMP_EN 0x00200000
|
|
#define FSL_DMA_MR_EMS_EN 0x00040000
|
|
#define FSL_DMA_MR_DAHE 0x00002000
|
|
#define FSL_DMA_MR_SAHE 0x00001000
|
|
|
|
/*
|
|
* Bandwidth/pause control determines how many bytes a given
|
|
* channel is allowed to transfer before the DMA engine pauses
|
|
* the current channel and switches to the next channel
|
|
*/
|
|
#define FSL_DMA_MR_BWC 0x0A000000
|
|
|
|
/* Special MR definition for MPC8349 */
|
|
#define FSL_DMA_MR_EOTIE 0x00000080
|
|
#define FSL_DMA_MR_PRC_RM 0x00000800
|
|
|
|
#define FSL_DMA_SR_CH 0x00000020
|
|
#define FSL_DMA_SR_PE 0x00000010
|
|
#define FSL_DMA_SR_CB 0x00000004
|
|
#define FSL_DMA_SR_TE 0x00000080
|
|
#define FSL_DMA_SR_EOSI 0x00000002
|
|
#define FSL_DMA_SR_EOLSI 0x00000001
|
|
#define FSL_DMA_SR_EOCDI 0x00000001
|
|
#define FSL_DMA_SR_EOLNI 0x00000008
|
|
|
|
#define FSL_DMA_SATR_SBPATMU 0x20000000
|
|
#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
|
|
#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
|
|
#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
|
|
#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
|
|
#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
|
|
|
|
#define FSL_DMA_DATR_DBPATMU 0x20000000
|
|
#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
|
|
#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
|
|
#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
|
|
|
|
#define FSL_DMA_EOL ((u64)0x1)
|
|
#define FSL_DMA_SNEN ((u64)0x10)
|
|
#define FSL_DMA_EOSIE 0x8
|
|
#define FSL_DMA_NLDA_MASK (~(u64)0x1f)
|
|
|
|
#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
|
|
|
|
#define FSL_DMA_DGSR_TE 0x80
|
|
#define FSL_DMA_DGSR_CH 0x20
|
|
#define FSL_DMA_DGSR_PE 0x10
|
|
#define FSL_DMA_DGSR_EOLNI 0x08
|
|
#define FSL_DMA_DGSR_CB 0x04
|
|
#define FSL_DMA_DGSR_EOSI 0x02
|
|
#define FSL_DMA_DGSR_EOLSI 0x01
|
|
|
|
typedef u64 __bitwise v64;
|
|
typedef u32 __bitwise v32;
|
|
|
|
struct fsl_dma_ld_hw {
|
|
v64 src_addr;
|
|
v64 dst_addr;
|
|
v64 next_ln_addr;
|
|
v32 count;
|
|
v32 reserve;
|
|
} __attribute__((aligned(32)));
|
|
|
|
struct fsl_desc_sw {
|
|
struct fsl_dma_ld_hw hw;
|
|
struct list_head node;
|
|
struct list_head tx_list;
|
|
struct dma_async_tx_descriptor async_tx;
|
|
} __attribute__((aligned(32)));
|
|
|
|
struct fsldma_chan_regs {
|
|
u32 mr; /* 0x00 - Mode Register */
|
|
u32 sr; /* 0x04 - Status Register */
|
|
u64 cdar; /* 0x08 - Current descriptor address register */
|
|
u64 sar; /* 0x10 - Source Address Register */
|
|
u64 dar; /* 0x18 - Destination Address Register */
|
|
u32 bcr; /* 0x20 - Byte Count Register */
|
|
u64 ndar; /* 0x24 - Next Descriptor Address Register */
|
|
};
|
|
|
|
struct fsldma_chan;
|
|
#define FSL_DMA_MAX_CHANS_PER_DEVICE 8
|
|
|
|
struct fsldma_device {
|
|
void __iomem *regs; /* DGSR register base */
|
|
struct device *dev;
|
|
struct dma_device common;
|
|
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
|
|
u32 feature; /* The same as DMA channels */
|
|
int irq; /* Channel IRQ */
|
|
};
|
|
|
|
/* Define macros for fsldma_chan->feature property */
|
|
#define FSL_DMA_LITTLE_ENDIAN 0x00000000
|
|
#define FSL_DMA_BIG_ENDIAN 0x00000001
|
|
|
|
#define FSL_DMA_IP_MASK 0x00000ff0
|
|
#define FSL_DMA_IP_85XX 0x00000010
|
|
#define FSL_DMA_IP_83XX 0x00000020
|
|
|
|
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
|
|
#define FSL_DMA_CHAN_START_EXT 0x00002000
|
|
|
|
#ifdef CONFIG_PM
|
|
struct fsldma_chan_regs_save {
|
|
u32 mr;
|
|
};
|
|
|
|
enum fsldma_pm_state {
|
|
RUNNING = 0,
|
|
SUSPENDED,
|
|
};
|
|
#endif
|
|
|
|
struct fsldma_chan {
|
|
char name[8]; /* Channel name */
|
|
struct fsldma_chan_regs __iomem *regs;
|
|
spinlock_t desc_lock; /* Descriptor operation lock */
|
|
/*
|
|
* Descriptors which are queued to run, but have not yet been
|
|
* submitted to the hardware for execution
|
|
*/
|
|
struct list_head ld_pending;
|
|
/*
|
|
* Descriptors which are currently being executed by the hardware
|
|
*/
|
|
struct list_head ld_running;
|
|
/*
|
|
* Descriptors which have finished execution by the hardware. These
|
|
* descriptors have already had their cleanup actions run. They are
|
|
* waiting for the ACK bit to be set by the async_tx API.
|
|
*/
|
|
struct list_head ld_completed; /* Link descriptors queue */
|
|
struct dma_chan common; /* DMA common channel */
|
|
struct dma_pool *desc_pool; /* Descriptors pool */
|
|
struct device *dev; /* Channel device */
|
|
int irq; /* Channel IRQ */
|
|
int id; /* Raw id of this channel */
|
|
struct tasklet_struct tasklet;
|
|
u32 feature;
|
|
bool idle; /* DMA controller is idle */
|
|
#ifdef CONFIG_PM
|
|
struct fsldma_chan_regs_save regs_save;
|
|
enum fsldma_pm_state pm_state;
|
|
#endif
|
|
|
|
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
|
|
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
|
|
void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
|
|
void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
|
|
void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
|
|
};
|
|
|
|
#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
|
|
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
|
|
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
|
|
|
|
#ifndef __powerpc64__
|
|
static u64 in_be64(const u64 __iomem *addr)
|
|
{
|
|
return ((u64)in_be32((u32 __iomem *)addr) << 32) |
|
|
(in_be32((u32 __iomem *)addr + 1));
|
|
}
|
|
|
|
static void out_be64(u64 __iomem *addr, u64 val)
|
|
{
|
|
out_be32((u32 __iomem *)addr, val >> 32);
|
|
out_be32((u32 __iomem *)addr + 1, (u32)val);
|
|
}
|
|
|
|
/* There is no asm instructions for 64 bits reverse loads and stores */
|
|
static u64 in_le64(const u64 __iomem *addr)
|
|
{
|
|
return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
|
|
(in_le32((u32 __iomem *)addr));
|
|
}
|
|
|
|
static void out_le64(u64 __iomem *addr, u64 val)
|
|
{
|
|
out_le32((u32 __iomem *)addr + 1, val >> 32);
|
|
out_le32((u32 __iomem *)addr, (u32)val);
|
|
}
|
|
#endif
|
|
|
|
#define DMA_IN(fsl_chan, addr, width) \
|
|
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
|
|
in_be##width(addr) : in_le##width(addr))
|
|
#define DMA_OUT(fsl_chan, addr, val, width) \
|
|
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
|
|
out_be##width(addr, val) : out_le##width(addr, val))
|
|
|
|
#define DMA_TO_CPU(fsl_chan, d, width) \
|
|
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
|
|
be##width##_to_cpu((__force __be##width)(v##width)d) : \
|
|
le##width##_to_cpu((__force __le##width)(v##width)d))
|
|
#define CPU_TO_DMA(fsl_chan, c, width) \
|
|
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
|
|
(__force v##width)cpu_to_be##width(c) : \
|
|
(__force v##width)cpu_to_le##width(c))
|
|
|
|
#endif /* __DMA_FSLDMA_H */
|