mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
s390/pci: move io address mapping code to pci_insn.c
This is a preparation patch for usage of new pci instructions. No functional change. Signed-off-by: Sebastian Ott <sebott@linux.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
fbfe07d440
commit
81deca12c2
@ -124,9 +124,11 @@ union zpci_sic_iib {
|
||||
|
||||
u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status);
|
||||
int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
|
||||
int zpci_load(u64 *data, u64 req, u64 offset);
|
||||
int zpci_store(u64 data, u64 req, u64 offset);
|
||||
int zpci_store_block(const u64 *data, u64 req, u64 offset);
|
||||
int __zpci_load(u64 *data, u64 req, u64 offset);
|
||||
int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len);
|
||||
int __zpci_store(u64 data, u64 req, u64 offset);
|
||||
int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len);
|
||||
int __zpci_store_block(const u64 *data, u64 req, u64 offset);
|
||||
int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib);
|
||||
|
||||
static inline int zpci_set_irq_ctrl(u16 ctl, u8 isc)
|
||||
|
@ -37,12 +37,10 @@ extern struct zpci_iomap_entry *zpci_iomap_start;
|
||||
#define zpci_read(LENGTH, RETTYPE) \
|
||||
static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
|
||||
{ \
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
|
||||
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
|
||||
u64 data; \
|
||||
int rc; \
|
||||
\
|
||||
rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
|
||||
rc = zpci_load(&data, addr, LENGTH); \
|
||||
if (rc) \
|
||||
data = -1ULL; \
|
||||
return (RETTYPE) data; \
|
||||
@ -52,11 +50,9 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
|
||||
static inline void zpci_write_##VALTYPE(VALTYPE val, \
|
||||
const volatile void __iomem *addr) \
|
||||
{ \
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
|
||||
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
|
||||
u64 data = (VALTYPE) val; \
|
||||
\
|
||||
zpci_store(data, req, ZPCI_OFFSET(addr)); \
|
||||
zpci_store(addr, data, LENGTH); \
|
||||
}
|
||||
|
||||
zpci_read(8, u64)
|
||||
@ -68,36 +64,38 @@ zpci_write(4, u32)
|
||||
zpci_write(2, u16)
|
||||
zpci_write(1, u8)
|
||||
|
||||
static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
|
||||
static inline int zpci_write_single(volatile void __iomem *dst, const void *src,
|
||||
unsigned long len)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
switch (len) {
|
||||
case 1:
|
||||
val = (u64) *((u8 *) data);
|
||||
val = (u64) *((u8 *) src);
|
||||
break;
|
||||
case 2:
|
||||
val = (u64) *((u16 *) data);
|
||||
val = (u64) *((u16 *) src);
|
||||
break;
|
||||
case 4:
|
||||
val = (u64) *((u32 *) data);
|
||||
val = (u64) *((u32 *) src);
|
||||
break;
|
||||
case 8:
|
||||
val = (u64) *((u64 *) data);
|
||||
val = (u64) *((u64 *) src);
|
||||
break;
|
||||
default:
|
||||
val = 0; /* let FW report error */
|
||||
break;
|
||||
}
|
||||
return zpci_store(val, req, offset);
|
||||
return zpci_store(dst, val, len);
|
||||
}
|
||||
|
||||
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
|
||||
static inline int zpci_read_single(void *dst, const volatile void __iomem *src,
|
||||
unsigned long len)
|
||||
{
|
||||
u64 data;
|
||||
int cc;
|
||||
|
||||
cc = zpci_load(&data, req, offset);
|
||||
cc = zpci_load(&data, src, len);
|
||||
if (cc)
|
||||
goto out;
|
||||
|
||||
@ -119,10 +117,8 @@ out:
|
||||
return cc;
|
||||
}
|
||||
|
||||
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
|
||||
{
|
||||
return zpci_store_block(data, req, offset);
|
||||
}
|
||||
int zpci_write_block(volatile void __iomem *dst, const void *src,
|
||||
unsigned long len);
|
||||
|
||||
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
|
||||
{
|
||||
@ -140,18 +136,15 @@ static inline int zpci_memcpy_fromio(void *dst,
|
||||
const volatile void __iomem *src,
|
||||
unsigned long n)
|
||||
{
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
|
||||
u64 req, offset = ZPCI_OFFSET(src);
|
||||
int size, rc = 0;
|
||||
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) src,
|
||||
(u64) dst, n, 8);
|
||||
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
|
||||
rc = zpci_read_single(req, dst, offset, size);
|
||||
rc = zpci_read_single(dst, src, size);
|
||||
if (rc)
|
||||
break;
|
||||
offset += size;
|
||||
src += size;
|
||||
dst += size;
|
||||
n -= size;
|
||||
}
|
||||
@ -161,8 +154,6 @@ static inline int zpci_memcpy_fromio(void *dst,
|
||||
static inline int zpci_memcpy_toio(volatile void __iomem *dst,
|
||||
const void *src, unsigned long n)
|
||||
{
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
|
||||
u64 req, offset = ZPCI_OFFSET(dst);
|
||||
int size, rc = 0;
|
||||
|
||||
if (!src)
|
||||
@ -171,16 +162,14 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
|
||||
while (n > 0) {
|
||||
size = zpci_get_max_write_size((u64 __force) dst,
|
||||
(u64) src, n, 128);
|
||||
req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
|
||||
|
||||
if (size > 8) /* main path */
|
||||
rc = zpci_write_block(req, src, offset);
|
||||
rc = zpci_write_block(dst, src, size);
|
||||
else
|
||||
rc = zpci_write_single(req, src, offset, size);
|
||||
rc = zpci_write_single(dst, src, size);
|
||||
if (rc)
|
||||
break;
|
||||
offset += size;
|
||||
src += size;
|
||||
dst += size;
|
||||
n -= size;
|
||||
}
|
||||
return rc;
|
||||
|
@ -188,7 +188,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
|
||||
u64 data;
|
||||
int rc;
|
||||
|
||||
rc = zpci_load(&data, req, offset);
|
||||
rc = __zpci_load(&data, req, offset);
|
||||
if (!rc) {
|
||||
data = le64_to_cpu((__force __le64) data);
|
||||
data >>= (8 - len) * 8;
|
||||
@ -206,7 +206,7 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
|
||||
|
||||
data <<= (8 - len) * 8;
|
||||
data = (__force u64) cpu_to_le64(data);
|
||||
rc = zpci_store(data, req, offset);
|
||||
rc = __zpci_store(data, req, offset);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <asm/facility.h>
|
||||
#include <asm/pci_insn.h>
|
||||
#include <asm/pci_debug.h>
|
||||
#include <asm/pci_io.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
|
||||
@ -142,7 +143,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
|
||||
return cc;
|
||||
}
|
||||
|
||||
int zpci_load(u64 *data, u64 req, u64 offset)
|
||||
int __zpci_load(u64 *data, u64 req, u64 offset)
|
||||
{
|
||||
u8 status;
|
||||
int cc;
|
||||
@ -158,6 +159,15 @@ int zpci_load(u64 *data, u64 req, u64 offset)
|
||||
|
||||
return (cc > 0) ? -EIO : cc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__zpci_load);
|
||||
|
||||
int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
|
||||
{
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
|
||||
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
|
||||
|
||||
return __zpci_load(data, req, ZPCI_OFFSET(addr));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_load);
|
||||
|
||||
/* PCI Store */
|
||||
@ -180,7 +190,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
|
||||
return cc;
|
||||
}
|
||||
|
||||
int zpci_store(u64 data, u64 req, u64 offset)
|
||||
int __zpci_store(u64 data, u64 req, u64 offset)
|
||||
{
|
||||
u8 status;
|
||||
int cc;
|
||||
@ -196,6 +206,15 @@ int zpci_store(u64 data, u64 req, u64 offset)
|
||||
|
||||
return (cc > 0) ? -EIO : cc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__zpci_store);
|
||||
|
||||
int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
|
||||
{
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
|
||||
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
|
||||
|
||||
return __zpci_store(data, req, ZPCI_OFFSET(addr));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_store);
|
||||
|
||||
/* PCI Store Block */
|
||||
@ -216,7 +235,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
|
||||
return cc;
|
||||
}
|
||||
|
||||
int zpci_store_block(const u64 *data, u64 req, u64 offset)
|
||||
int __zpci_store_block(const u64 *data, u64 req, u64 offset)
|
||||
{
|
||||
u8 status;
|
||||
int cc;
|
||||
@ -232,4 +251,15 @@ int zpci_store_block(const u64 *data, u64 req, u64 offset)
|
||||
|
||||
return (cc > 0) ? -EIO : cc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_store_block);
|
||||
EXPORT_SYMBOL_GPL(__zpci_store_block);
|
||||
|
||||
int zpci_write_block(volatile void __iomem *dst,
|
||||
const void *src, unsigned long len)
|
||||
{
|
||||
struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
|
||||
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
|
||||
u64 offset = ZPCI_OFFSET(dst);
|
||||
|
||||
return __zpci_store_block(src, req, offset);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zpci_write_block);
|
||||
|
@ -215,7 +215,7 @@ static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data,
|
||||
struct zpci_dev *zdev = to_zpci(ism->pdev);
|
||||
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size);
|
||||
|
||||
return zpci_write_block(req, data, dmb_req);
|
||||
return __zpci_store_block(data, req, dmb_req);
|
||||
}
|
||||
|
||||
#endif /* S390_ISM_H */
|
||||
|
Loading…
Reference in New Issue
Block a user