2019-05-29 14:18:02 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2008-07-08 18:58:36 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007, 2008, Marvell International Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef MV_XOR_H
|
|
|
|
#define MV_XOR_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
|
2015-05-26 13:07:35 +00:00
|
|
|
#define MV_XOR_POOL_SIZE (MV_XOR_SLOT_SIZE * 3072)
|
2008-07-08 18:58:36 +00:00
|
|
|
#define MV_XOR_SLOT_SIZE 64
|
|
|
|
#define MV_XOR_THRESHOLD 1
|
2012-10-29 15:54:49 +00:00
|
|
|
#define MV_XOR_MAX_CHANNELS 2
|
2008-07-08 18:58:36 +00:00
|
|
|
|
2014-08-27 13:52:55 +00:00
|
|
|
#define MV_XOR_MIN_BYTE_COUNT SZ_128
|
|
|
|
#define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1)
|
|
|
|
|
2013-07-29 15:42:14 +00:00
|
|
|
/* Values for the XOR_CONFIG register */
|
2008-07-08 18:58:36 +00:00
|
|
|
#define XOR_OPERATION_MODE_XOR 0
|
|
|
|
#define XOR_OPERATION_MODE_MEMCPY 2
|
2015-05-26 13:07:34 +00:00
|
|
|
#define XOR_OPERATION_MODE_IN_DESC 7
|
2013-07-29 15:42:14 +00:00
|
|
|
#define XOR_DESCRIPTOR_SWAP BIT(14)
|
2015-05-26 13:07:32 +00:00
|
|
|
#define XOR_DESC_SUCCESS 0x40000000
|
2008-07-08 18:58:36 +00:00
|
|
|
|
2015-05-26 13:07:34 +00:00
|
|
|
#define XOR_DESC_OPERATION_XOR (0 << 24)
|
|
|
|
#define XOR_DESC_OPERATION_CRC32C (1 << 24)
|
|
|
|
#define XOR_DESC_OPERATION_MEMCPY (2 << 24)
|
|
|
|
|
2014-08-27 13:52:52 +00:00
|
|
|
#define XOR_DESC_DMA_OWNED BIT(31)
|
|
|
|
#define XOR_DESC_EOD_INT_EN BIT(31)
|
|
|
|
|
2013-10-30 15:01:43 +00:00
|
|
|
#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
|
|
|
|
#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
|
|
|
|
#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
|
|
|
|
#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
|
|
|
|
#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
|
|
|
|
#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
|
|
|
|
#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
|
2008-07-08 18:58:36 +00:00
|
|
|
|
|
|
|
#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
|
|
|
|
#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
|
|
|
|
#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
|
|
|
|
#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
|
|
|
|
#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
|
|
|
|
#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
|
2014-08-27 13:52:52 +00:00
|
|
|
|
|
|
|
#define XOR_INT_END_OF_DESC BIT(0)
|
|
|
|
#define XOR_INT_END_OF_CHAIN BIT(1)
|
|
|
|
#define XOR_INT_STOPPED BIT(2)
|
|
|
|
#define XOR_INT_PAUSED BIT(3)
|
|
|
|
#define XOR_INT_ERR_DECODE BIT(4)
|
|
|
|
#define XOR_INT_ERR_RDPROT BIT(5)
|
|
|
|
#define XOR_INT_ERR_WRPROT BIT(6)
|
|
|
|
#define XOR_INT_ERR_OWN BIT(7)
|
|
|
|
#define XOR_INT_ERR_PAR BIT(8)
|
|
|
|
#define XOR_INT_ERR_MBUS BIT(9)
|
|
|
|
|
|
|
|
#define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
|
|
|
|
XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \
|
|
|
|
XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS)
|
|
|
|
|
2014-08-27 13:52:53 +00:00
|
|
|
#define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
|
2014-08-27 13:52:52 +00:00
|
|
|
XOR_INT_STOPPED | XOR_INTR_ERRORS)
|
2008-07-08 18:58:36 +00:00
|
|
|
|
2013-10-30 15:01:43 +00:00
|
|
|
#define WINDOW_BASE(w) (0x50 + ((w) << 2))
|
|
|
|
#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
|
|
|
|
#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
|
|
|
|
#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
|
|
|
|
#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
|
2008-07-08 18:58:36 +00:00
|
|
|
|
2016-09-15 05:37:31 +00:00
|
|
|
#define WINDOW_COUNT 8
|
|
|
|
|
2012-11-15 14:29:53 +00:00
|
|
|
struct mv_xor_device {
|
2012-10-29 15:54:49 +00:00
|
|
|
void __iomem *xor_base;
|
|
|
|
void __iomem *xor_high_base;
|
|
|
|
struct clk *clk;
|
2012-11-15 14:17:05 +00:00
|
|
|
struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
|
2016-04-29 07:49:06 +00:00
|
|
|
int xor_type;
|
2016-09-15 05:37:31 +00:00
|
|
|
|
|
|
|
u32 win_start[WINDOW_COUNT];
|
|
|
|
u32 win_end[WINDOW_COUNT];
|
2008-07-08 18:58:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mv_xor_chan - internal representation of a XOR channel
|
|
|
|
* @pending: allows batching of hardware operations
|
|
|
|
* @lock: serializes enqueue/dequeue operations to the descriptors pool
|
|
|
|
* @mmr_base: memory mapped register base
|
|
|
|
* @idx: the index of the xor channel
|
|
|
|
* @chain: device chain view of the descriptors
|
2015-05-26 13:07:36 +00:00
|
|
|
* @free_slots: free slots usable by the channel
|
|
|
|
* @allocated_slots: slots allocated by the driver
|
2008-07-08 18:58:36 +00:00
|
|
|
* @completed_slots: slots completed by HW but still need to be acked
|
|
|
|
* @device: parent device
|
|
|
|
* @common: common dmaengine channel object members
|
|
|
|
* @slots_allocated: records the actual size of the descriptor slot pool
|
|
|
|
* @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
|
2015-05-26 13:07:34 +00:00
|
|
|
* @op_in_desc: new mode of driver, each op is writen to descriptor.
|
2008-07-08 18:58:36 +00:00
|
|
|
*/
|
|
|
|
struct mv_xor_chan {
|
|
|
|
int pending;
|
|
|
|
spinlock_t lock; /* protects the descriptor slot pool */
|
|
|
|
void __iomem *mmr_base;
|
2013-10-30 15:01:43 +00:00
|
|
|
void __iomem *mmr_high_base;
|
2008-07-08 18:58:36 +00:00
|
|
|
unsigned int idx;
|
2012-11-15 15:11:18 +00:00
|
|
|
int irq;
|
2008-07-08 18:58:36 +00:00
|
|
|
struct list_head chain;
|
2015-05-26 13:07:36 +00:00
|
|
|
struct list_head free_slots;
|
|
|
|
struct list_head allocated_slots;
|
2008-07-08 18:58:36 +00:00
|
|
|
struct list_head completed_slots;
|
2012-11-15 14:17:05 +00:00
|
|
|
dma_addr_t dma_desc_pool;
|
|
|
|
void *dma_desc_pool_virt;
|
|
|
|
size_t pool_size;
|
|
|
|
struct dma_device dmadev;
|
2012-11-15 13:57:44 +00:00
|
|
|
struct dma_chan dmachan;
|
2008-07-08 18:58:36 +00:00
|
|
|
int slots_allocated;
|
|
|
|
struct tasklet_struct irq_tasklet;
|
2015-05-26 13:07:34 +00:00
|
|
|
int op_in_desc;
|
2014-08-27 13:52:55 +00:00
|
|
|
char dummy_src[MV_XOR_MIN_BYTE_COUNT];
|
|
|
|
char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
|
|
|
|
dma_addr_t dummy_src_addr, dummy_dst_addr;
|
2015-12-22 10:43:29 +00:00
|
|
|
u32 saved_config_reg, saved_int_mask_reg;
|
2016-09-15 05:37:31 +00:00
|
|
|
|
|
|
|
struct mv_xor_device *xordev;
|
2008-07-08 18:58:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct mv_xor_desc_slot - software descriptor
|
2015-05-26 13:07:36 +00:00
|
|
|
* @node: node on the mv_xor_chan lists
|
2008-07-08 18:58:36 +00:00
|
|
|
* @hw_desc: virtual address of the hardware descriptor chain
|
|
|
|
* @phys: hardware address of the hardware descriptor chain
|
2014-08-27 13:52:51 +00:00
|
|
|
* @slot_used: slot in use or not
|
2008-07-08 18:58:36 +00:00
|
|
|
* @idx: pool index
|
2009-09-09 00:53:03 +00:00
|
|
|
* @tx_list: list of slots that make up a multi-descriptor transaction
|
2008-07-08 18:58:36 +00:00
|
|
|
* @async_tx: support for the async_tx api
|
|
|
|
*/
|
|
|
|
struct mv_xor_desc_slot {
|
2015-05-26 13:07:36 +00:00
|
|
|
struct list_head node;
|
2016-10-26 08:10:25 +00:00
|
|
|
struct list_head sg_tx_list;
|
2008-07-08 18:58:36 +00:00
|
|
|
enum dma_transaction_type type;
|
|
|
|
void *hw_desc;
|
|
|
|
u16 idx;
|
|
|
|
struct dma_async_tx_descriptor async_tx;
|
|
|
|
};
|
|
|
|
|
2013-07-29 15:42:14 +00:00
|
|
|
/*
|
|
|
|
* This structure describes XOR descriptor size 64bytes. The
|
|
|
|
* mv_phy_src_idx() macro must be used when indexing the values of the
|
|
|
|
* phy_src_addr[] array. This is due to the fact that the 'descriptor
|
|
|
|
* swap' feature, used on big endian systems, swaps descriptors data
|
|
|
|
* within blocks of 8 bytes. So two consecutive values of the
|
|
|
|
* phy_src_addr[] array are actually swapped in big-endian, which
|
|
|
|
* explains the different mv_phy_src_idx() implementation.
|
|
|
|
*/
|
|
|
|
#if defined(__LITTLE_ENDIAN)
|
2008-07-08 18:58:36 +00:00
|
|
|
struct mv_xor_desc {
|
|
|
|
u32 status; /* descriptor execution status */
|
|
|
|
u32 crc32_result; /* result of CRC-32 calculation */
|
|
|
|
u32 desc_command; /* type of operation to be carried out */
|
|
|
|
u32 phy_next_desc; /* next descriptor address pointer */
|
|
|
|
u32 byte_count; /* size of src/dst blocks in bytes */
|
|
|
|
u32 phy_dest_addr; /* destination block address */
|
|
|
|
u32 phy_src_addr[8]; /* source block addresses */
|
|
|
|
u32 reserved0;
|
|
|
|
u32 reserved1;
|
|
|
|
};
|
2013-07-29 15:42:14 +00:00
|
|
|
#define mv_phy_src_idx(src_idx) (src_idx)
|
|
|
|
#else
|
|
|
|
struct mv_xor_desc {
|
|
|
|
u32 crc32_result; /* result of CRC-32 calculation */
|
|
|
|
u32 status; /* descriptor execution status */
|
|
|
|
u32 phy_next_desc; /* next descriptor address pointer */
|
|
|
|
u32 desc_command; /* type of operation to be carried out */
|
|
|
|
u32 phy_dest_addr; /* destination block address */
|
|
|
|
u32 byte_count; /* size of src/dst blocks in bytes */
|
|
|
|
u32 phy_src_addr[8]; /* source block addresses */
|
|
|
|
u32 reserved1;
|
|
|
|
u32 reserved0;
|
|
|
|
};
|
|
|
|
#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
|
|
|
|
#endif
|
2008-07-08 18:58:36 +00:00
|
|
|
|
|
|
|
#define to_mv_sw_desc(addr_hw_desc) \
|
|
|
|
container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
|
|
|
|
|
|
|
|
#define mv_hw_desc_slot_idx(hw_desc, idx) \
|
|
|
|
((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
|
|
|
|
|
|
|
|
#endif
|