linux/include/asm-powerpc/io.h
Hugh Dickins 292f86f005 [POWERPC] Make mmiowb's io_sync preempt safe
If mmiowb() is always used prior to releasing spinlock as Doc suggests,
then it's safe against preemption; but I'm not convinced that's always
the case.  If preemption occurs between sync and get_paca()->io_sync = 0,
I believe there's no problem.  But in the unlikely event that gcc does
the store relative to another register than r13 (as it did with current),
then there's a small danger of setting another cpu's io_sync to 0, after
it had just set it to 1.  Rewrite ppc64 mmiowb to prevent that.

The remaining io_sync assignments in io.h all get_paca()->io_sync = 1,
which is harmless even if preempted to the wrong cpu (the context switch
itself syncs); and those in spinlock.h are while preemption is disabled.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2006-11-01 14:52:49 +11:00

432 lines
14 KiB
C

#ifndef _ASM_POWERPC_IO_H
#define _ASM_POWERPC_IO_H
#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Check of existence of legacy devices */
extern int check_legacy_ioport(unsigned long base_port);
#define PNPBIOS_BASE 0xf000 /* only relevant for PReP */
#ifndef CONFIG_PPC64
#include <asm-ppc/io.h>
#else
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/byteorder.h>
#include <asm/paca.h>
#include <asm/synch.h>
#include <asm/delay.h>
#include <asm-generic/iomap.h>
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
#define SLOW_DOWN_IO
extern unsigned long isa_io_base;
extern unsigned long pci_io_base;
#ifdef CONFIG_PPC_ISERIES
extern int in_8(const volatile unsigned char __iomem *addr);
extern void out_8(volatile unsigned char __iomem *addr, int val);
extern int in_le16(const volatile unsigned short __iomem *addr);
extern int in_be16(const volatile unsigned short __iomem *addr);
extern void out_le16(volatile unsigned short __iomem *addr, int val);
extern void out_be16(volatile unsigned short __iomem *addr, int val);
extern unsigned in_le32(const volatile unsigned __iomem *addr);
extern unsigned in_be32(const volatile unsigned __iomem *addr);
extern void out_le32(volatile unsigned __iomem *addr, int val);
extern void out_be32(volatile unsigned __iomem *addr, int val);
extern unsigned long in_le64(const volatile unsigned long __iomem *addr);
extern unsigned long in_be64(const volatile unsigned long __iomem *addr);
extern void out_le64(volatile unsigned long __iomem *addr, unsigned long val);
extern void out_be64(volatile unsigned long __iomem *addr, unsigned long val);
extern unsigned char __raw_readb(const volatile void __iomem *addr);
extern unsigned short __raw_readw(const volatile void __iomem *addr);
extern unsigned int __raw_readl(const volatile void __iomem *addr);
extern unsigned long __raw_readq(const volatile void __iomem *addr);
extern void __raw_writeb(unsigned char v, volatile void __iomem *addr);
extern void __raw_writew(unsigned short v, volatile void __iomem *addr);
extern void __raw_writel(unsigned int v, volatile void __iomem *addr);
extern void __raw_writeq(unsigned long v, volatile void __iomem *addr);
extern void memset_io(volatile void __iomem *addr, int c, unsigned long n);
extern void memcpy_fromio(void *dest, const volatile void __iomem *src,
unsigned long n);
extern void memcpy_toio(volatile void __iomem *dest, const void *src,
unsigned long n);
#else /* CONFIG_PPC_ISERIES */
#define in_8(addr) __in_8((addr))
#define out_8(addr, val) __out_8((addr), (val))
#define in_le16(addr) __in_le16((addr))
#define in_be16(addr) __in_be16((addr))
#define out_le16(addr, val) __out_le16((addr), (val))
#define out_be16(addr, val) __out_be16((addr), (val))
#define in_le32(addr) __in_le32((addr))
#define in_be32(addr) __in_be32((addr))
#define out_le32(addr, val) __out_le32((addr), (val))
#define out_be32(addr, val) __out_be32((addr), (val))
#define in_le64(addr) __in_le64((addr))
#define in_be64(addr) __in_be64((addr))
#define out_le64(addr, val) __out_le64((addr), (val))
#define out_be64(addr, val) __out_be64((addr), (val))
static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)addr;
}
static inline unsigned short __raw_readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)addr;
}
static inline unsigned int __raw_readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *)addr;
}
static inline unsigned long __raw_readq(const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *)addr;
}
static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)addr = v;
}
static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
{
*(volatile unsigned short __force *)addr = v;
}
static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)addr = v;
}
static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
{
*(volatile unsigned long __force *)addr = v;
}
#define memset_io(a,b,c) eeh_memset_io((a),(b),(c))
#define memcpy_fromio(a,b,c) eeh_memcpy_fromio((a),(b),(c))
#define memcpy_toio(a,b,c) eeh_memcpy_toio((a),(b),(c))
#endif /* CONFIG_PPC_ISERIES */
/*
* The insw/outsw/insl/outsl macros don't do byte-swapping.
* They are only used in practice for transferring buffers which
* are arrays of bytes, and byte-swapping is not appropriate in
* that case. - paulus */
#define insb(port, buf, ns) eeh_insb((port), (buf), (ns))
#define insw(port, buf, ns) eeh_insw_ns((port), (buf), (ns))
#define insl(port, buf, nl) eeh_insl_ns((port), (buf), (nl))
#define outsb(port, buf, ns) _outsb((u8 __iomem *)((port)+pci_io_base), (buf), (ns))
#define outsw(port, buf, ns) _outsw_ns((u16 __iomem *)((port)+pci_io_base), (buf), (ns))
#define outsl(port, buf, nl) _outsl_ns((u32 __iomem *)((port)+pci_io_base), (buf), (nl))
#define readb(addr) eeh_readb(addr)
#define readw(addr) eeh_readw(addr)
#define readl(addr) eeh_readl(addr)
#define readq(addr) eeh_readq(addr)
#define writeb(data, addr) eeh_writeb((data), (addr))
#define writew(data, addr) eeh_writew((data), (addr))
#define writel(data, addr) eeh_writel((data), (addr))
#define writeq(data, addr) eeh_writeq((data), (addr))
#define inb(port) eeh_inb((unsigned long)port)
#define outb(val, port) eeh_outb(val, (unsigned long)port)
#define inw(port) eeh_inw((unsigned long)port)
#define outw(val, port) eeh_outw(val, (unsigned long)port)
#define inl(port) eeh_inl((unsigned long)port)
#define outl(val, port) eeh_outl(val, (unsigned long)port)
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
#define readq_relaxed(addr) readq(addr)
extern void _insb(volatile u8 __iomem *port, void *buf, long count);
extern void _outsb(volatile u8 __iomem *port, const void *buf, long count);
extern void _insw_ns(volatile u16 __iomem *port, void *buf, long count);
extern void _outsw_ns(volatile u16 __iomem *port, const void *buf, long count);
extern void _insl_ns(volatile u32 __iomem *port, void *buf, long count);
extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
static inline void mmiowb(void)
{
unsigned long tmp;
__asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
: "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
: "memory");
}
/*
* output pause versions need a delay at least for the
* w83c105 ide controller in a p610.
*/
#define inb_p(port) inb(port)
#define outb_p(val, port) (udelay(1), outb((val), (port)))
#define inw_p(port) inw(port)
#define outw_p(val, port) (udelay(1), outw((val), (port)))
#define inl_p(port) inl(port)
#define outl_p(val, port) (udelay(1), outl((val), (port)))
#define IO_SPACE_LIMIT ~(0UL)
extern int __ioremap_explicit(unsigned long p_addr, unsigned long v_addr,
unsigned long size, unsigned long flags);
extern void __iomem *__ioremap(unsigned long address, unsigned long size,
unsigned long flags);
/**
* ioremap - map bus memory into CPU space
* @address: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*/
extern void __iomem *ioremap(unsigned long address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
extern int iounmap_explicit(volatile void __iomem *addr, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
extern void __iomem * reserve_phb_iospace(unsigned long size);
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void * address)
{
return __pa((unsigned long)address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void * phys_to_virt(unsigned long address)
{
return (void *)__va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
/* We do NOT want virtual merging, it would put too much pressure on
* our iommu allocator. Instead, we want drivers to be smart enough
* to coalesce sglists that happen to have been mapped in a contiguous
* way by the iommu
*/
#define BIO_VMERGE_BOUNDARY 0
static inline void iosync(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
}
/* Enforce in-order execution of data I/O.
* No distinction between read/write on PPC; use eieio for all three.
*/
#define iobarrier_rw() eieio()
#define iobarrier_r() eieio()
#define iobarrier_w() eieio()
/*
* 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
* These routines do not perform EEH-related I/O address translation,
* and should not be used directly by device drivers. Use inb/readb
* instead.
*/
static inline int __in_8(const volatile unsigned char __iomem *addr)
{
int ret;
__asm__ __volatile__("sync; lbz%U1%X1 %0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "m" (*addr));
return ret;
}
static inline void __out_8(volatile unsigned char __iomem *addr, int val)
{
__asm__ __volatile__("sync; stb%U0%X0 %1,%0"
: "=m" (*addr) : "r" (val));
get_paca()->io_sync = 1;
}
static inline int __in_le16(const volatile unsigned short __iomem *addr)
{
int ret;
__asm__ __volatile__("sync; lhbrx %0,0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "r" (addr), "m" (*addr));
return ret;
}
static inline int __in_be16(const volatile unsigned short __iomem *addr)
{
int ret;
__asm__ __volatile__("sync; lhz%U1%X1 %0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "m" (*addr));
return ret;
}
static inline void __out_le16(volatile unsigned short __iomem *addr, int val)
{
__asm__ __volatile__("sync; sthbrx %1,0,%2"
: "=m" (*addr) : "r" (val), "r" (addr));
get_paca()->io_sync = 1;
}
static inline void __out_be16(volatile unsigned short __iomem *addr, int val)
{
__asm__ __volatile__("sync; sth%U0%X0 %1,%0"
: "=m" (*addr) : "r" (val));
get_paca()->io_sync = 1;
}
static inline unsigned __in_le32(const volatile unsigned __iomem *addr)
{
unsigned ret;
__asm__ __volatile__("sync; lwbrx %0,0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "r" (addr), "m" (*addr));
return ret;
}
static inline unsigned __in_be32(const volatile unsigned __iomem *addr)
{
unsigned ret;
__asm__ __volatile__("sync; lwz%U1%X1 %0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "m" (*addr));
return ret;
}
static inline void __out_le32(volatile unsigned __iomem *addr, int val)
{
__asm__ __volatile__("sync; stwbrx %1,0,%2" : "=m" (*addr)
: "r" (val), "r" (addr));
get_paca()->io_sync = 1;
}
static inline void __out_be32(volatile unsigned __iomem *addr, int val)
{
__asm__ __volatile__("sync; stw%U0%X0 %1,%0"
: "=m" (*addr) : "r" (val));
get_paca()->io_sync = 1;
}
static inline unsigned long __in_le64(const volatile unsigned long __iomem *addr)
{
unsigned long tmp, ret;
__asm__ __volatile__(
"sync\n"
"ld %1,0(%2)\n"
"twi 0,%1,0\n"
"isync\n"
"rldimi %0,%1,5*8,1*8\n"
"rldimi %0,%1,3*8,2*8\n"
"rldimi %0,%1,1*8,3*8\n"
"rldimi %0,%1,7*8,4*8\n"
"rldicl %1,%1,32,0\n"
"rlwimi %0,%1,8,8,31\n"
"rlwimi %0,%1,24,16,23\n"
: "=r" (ret) , "=r" (tmp) : "b" (addr) , "m" (*addr));
return ret;
}
static inline unsigned long __in_be64(const volatile unsigned long __iomem *addr)
{
unsigned long ret;
__asm__ __volatile__("sync; ld%U1%X1 %0,%1; twi 0,%0,0; isync"
: "=r" (ret) : "m" (*addr));
return ret;
}
static inline void __out_le64(volatile unsigned long __iomem *addr, unsigned long val)
{
unsigned long tmp;
__asm__ __volatile__(
"rldimi %0,%1,5*8,1*8\n"
"rldimi %0,%1,3*8,2*8\n"
"rldimi %0,%1,1*8,3*8\n"
"rldimi %0,%1,7*8,4*8\n"
"rldicl %1,%1,32,0\n"
"rlwimi %0,%1,8,8,31\n"
"rlwimi %0,%1,24,16,23\n"
"sync\n"
"std %0,0(%3)"
: "=&r" (tmp) , "=&r" (val) : "1" (val) , "b" (addr) , "m" (*addr));
get_paca()->io_sync = 1;
}
static inline void __out_be64(volatile unsigned long __iomem *addr, unsigned long val)
{
__asm__ __volatile__("sync; std%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
get_paca()->io_sync = 1;
}
#include <asm/eeh.h>
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
*/
#define xlate_dev_mem_ptr(p) __va(p)
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_IO_H */