Merge branch 'master'
This commit is contained in:
@@ -66,4 +66,5 @@ struct pxafb_mach_info {
|
||||
|
||||
};
|
||||
void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
|
||||
void set_pxa_fb_parent(struct device *parent_dev);
|
||||
unsigned long pxafb_get_hsync_time(struct device *dev);
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
* 06-Dec-1997 RMK Created.
|
||||
* 02-Sep-2003 BJD Modified for S3C2410
|
||||
* 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA
|
||||
*
|
||||
* 13-Oct-2005 BJD Fixed problems with LDRH/STRH offset range
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARM_ARCH_IO_H
|
||||
@@ -97,7 +97,7 @@ DECLARE_IO(int,l,"")
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"strb %0, [%1, #0] @ outbc" \
|
||||
: : "r" (value), "r" ((port))); \
|
||||
: : "r" (value), "r" ((port))); \
|
||||
})
|
||||
|
||||
#define __inbc(port) \
|
||||
@@ -110,35 +110,61 @@ DECLARE_IO(int,l,"")
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"ldrb %0, [%1, #0] @ inbc" \
|
||||
: "=r" (result) : "r" ((port))); \
|
||||
: "=r" (result) : "r" ((port))); \
|
||||
result; \
|
||||
})
|
||||
|
||||
#define __outwc(value,port) \
|
||||
({ \
|
||||
unsigned long v = value; \
|
||||
if (__PORT_PCIO((port))) \
|
||||
__asm__ __volatile__( \
|
||||
"strh %0, [%1, %2] @ outwc" \
|
||||
: : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \
|
||||
else \
|
||||
if (__PORT_PCIO((port))) { \
|
||||
if ((port) < 256 && (port) > -256) \
|
||||
__asm__ __volatile__( \
|
||||
"strh %0, [%1, %2] @ outwc" \
|
||||
: : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \
|
||||
else if ((port) > 0) \
|
||||
__asm__ __volatile__( \
|
||||
"strh %0, [%1, %2] @ outwc" \
|
||||
: : "r" (v), \
|
||||
"r" (PCIO_BASE + ((port) & ~0xff)), \
|
||||
"Jr" (((port) & 0xff))); \
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"strh %0, [%1, #0] @ outwc" \
|
||||
: : "r" (v), \
|
||||
"r" (PCIO_BASE + (port))); \
|
||||
} else \
|
||||
__asm__ __volatile__( \
|
||||
"strh %0, [%1, #0] @ outwc" \
|
||||
: : "r" (v), "r" ((port))); \
|
||||
: : "r" (v), "r" ((port))); \
|
||||
})
|
||||
|
||||
#define __inwc(port) \
|
||||
({ \
|
||||
unsigned short result; \
|
||||
if (__PORT_PCIO((port))) \
|
||||
__asm__ __volatile__( \
|
||||
"ldrh %0, [%1, %2] @ inwc" \
|
||||
: "=r" (result) : "r" (PCIO_BASE), "Jr" ((port))); \
|
||||
else \
|
||||
if (__PORT_PCIO((port))) { \
|
||||
if ((port) < 256 && (port) > -256 ) \
|
||||
__asm__ __volatile__( \
|
||||
"ldrh %0, [%1, %2] @ inwc" \
|
||||
: "=r" (result) \
|
||||
: "r" (PCIO_BASE), \
|
||||
"Jr" ((port))); \
|
||||
else if ((port) > 0) \
|
||||
__asm__ __volatile__( \
|
||||
"ldrh %0, [%1, %2] @ inwc" \
|
||||
: "=r" (result) \
|
||||
: "r" (PCIO_BASE + ((port) & ~0xff)), \
|
||||
"Jr" (((port) & 0xff))); \
|
||||
else \
|
||||
__asm__ __volatile__( \
|
||||
"ldrh %0, [%1, #0] @ inwc" \
|
||||
: "=r" (result) \
|
||||
: "r" (PCIO_BASE + ((port)))); \
|
||||
} else \
|
||||
__asm__ __volatile__( \
|
||||
"ldrh %0, [%1, #0] @ inwc" \
|
||||
: "=r" (result) : "r" ((port))); \
|
||||
result; \
|
||||
: "=r" (result) : "r" ((port))); \
|
||||
result; \
|
||||
})
|
||||
|
||||
#define __outlc(value,port) \
|
||||
|
||||
@@ -27,23 +27,27 @@
|
||||
* PCI bus.
|
||||
*/
|
||||
|
||||
#define PBM_LOGCLUSTERS 3
|
||||
#define PBM_NCLUSTERS (1 << PBM_LOGCLUSTERS)
|
||||
|
||||
struct pci_controller_info;
|
||||
|
||||
/* This contains the software state necessary to drive a PCI
|
||||
* controller's IOMMU.
|
||||
*/
|
||||
struct pci_iommu_arena {
|
||||
unsigned long *map;
|
||||
unsigned int hint;
|
||||
unsigned int limit;
|
||||
};
|
||||
|
||||
struct pci_iommu {
|
||||
/* This protects the controller's IOMMU and all
|
||||
* streaming buffers underneath.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
|
||||
struct pci_iommu_arena arena;
|
||||
|
||||
/* IOMMU page table, a linear array of ioptes. */
|
||||
iopte_t *page_table; /* The page table itself. */
|
||||
int page_table_sz_bits; /* log2 of ow many pages does it map? */
|
||||
|
||||
/* Base PCI memory space address where IOMMU mappings
|
||||
* begin.
|
||||
@@ -62,12 +66,6 @@ struct pci_iommu {
|
||||
*/
|
||||
unsigned long write_complete_reg;
|
||||
|
||||
/* The lowest used consistent mapping entry. Since
|
||||
* we allocate consistent maps out of cluster 0 this
|
||||
* is relative to the beginning of closter 0.
|
||||
*/
|
||||
u32 lowest_consistent_map;
|
||||
|
||||
/* In order to deal with some buggy third-party PCI bridges that
|
||||
* do wrong prefetching, we never mark valid mappings as invalid.
|
||||
* Instead we point them at this dummy page.
|
||||
@@ -75,16 +73,6 @@ struct pci_iommu {
|
||||
unsigned long dummy_page;
|
||||
unsigned long dummy_page_pa;
|
||||
|
||||
/* If PBM_NCLUSTERS is ever decreased to 4 or lower,
|
||||
* or if largest supported page_table_sz * 8K goes above
|
||||
* 2GB, you must increase the size of the type of
|
||||
* these counters. You have been duly warned. -DaveM
|
||||
*/
|
||||
struct {
|
||||
u16 next;
|
||||
u16 flush;
|
||||
} alloc_info[PBM_NCLUSTERS];
|
||||
|
||||
/* CTX allocation. */
|
||||
unsigned long ctx_lowest_free;
|
||||
unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)];
|
||||
@@ -102,7 +90,7 @@ struct pci_iommu {
|
||||
u32 dma_addr_mask;
|
||||
};
|
||||
|
||||
extern void pci_iommu_table_init(struct pci_iommu *, int);
|
||||
extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask);
|
||||
|
||||
/* This describes a PCI bus module's streaming buffer. */
|
||||
struct pci_strbuf {
|
||||
|
||||
@@ -162,13 +162,13 @@ typedef struct acct acct_t;
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
* Yet another set of HZ to *HZ helper functions.
|
||||
* See <linux/times.h> for the original.
|
||||
* See <linux/jiffies.h> for the original.
|
||||
*/
|
||||
|
||||
static inline u32 jiffies_to_AHZ(unsigned long x)
|
||||
{
|
||||
#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
|
||||
return x / (HZ / USER_HZ);
|
||||
return x / (HZ / AHZ);
|
||||
#else
|
||||
u64 tmp = (u64)x * TICK_NSEC;
|
||||
do_div(tmp, (NSEC_PER_SEC / AHZ));
|
||||
|
||||
@@ -24,7 +24,12 @@ struct kioctx;
|
||||
#define KIOCB_SYNC_KEY (~0U)
|
||||
|
||||
/* ki_flags bits */
|
||||
#define KIF_LOCKED 0
|
||||
/*
|
||||
* This may be used for cancel/retry serialization in the future, but
|
||||
* for now it's unused and we probably don't want modules to even
|
||||
* think they can use it.
|
||||
*/
|
||||
/* #define KIF_LOCKED 0 */
|
||||
#define KIF_KICKED 1
|
||||
#define KIF_CANCELLED 2
|
||||
|
||||
|
||||
@@ -393,15 +393,13 @@ extern cpumask_t cpu_present_map;
|
||||
#define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map)
|
||||
|
||||
/* Find the highest possible smp_processor_id() */
|
||||
static inline unsigned int highest_possible_processor_id(void)
|
||||
{
|
||||
unsigned int cpu, highest = 0;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map)
|
||||
highest = cpu;
|
||||
|
||||
return highest;
|
||||
}
|
||||
#define highest_possible_processor_id() \
|
||||
({ \
|
||||
unsigned int cpu, highest = 0; \
|
||||
for_each_cpu_mask(cpu, cpu_possible_map) \
|
||||
highest = cpu; \
|
||||
highest; \
|
||||
})
|
||||
|
||||
|
||||
#endif /* __LINUX_CPUMASK_H */
|
||||
|
||||
@@ -442,12 +442,14 @@ static inline void list_splice_init(struct list_head *list,
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = (head)->next; prefetch(pos->next), pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
for (pos = (head)->next; \
|
||||
prefetch(rcu_dereference(pos)->next), pos != (head); \
|
||||
pos = pos->next)
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = (head)->next; pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
for (pos = (head)->next; \
|
||||
rcu_dereference(pos) != (head); \
|
||||
pos = pos->next)
|
||||
|
||||
/**
|
||||
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
|
||||
@@ -461,8 +463,9 @@ static inline void list_splice_init(struct list_head *list,
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_safe_rcu(pos, n, head) \
|
||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||
pos = rcu_dereference(n), n = pos->next)
|
||||
for (pos = (head)->next; \
|
||||
n = rcu_dereference(pos)->next, pos != (head); \
|
||||
pos = n)
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
@@ -474,11 +477,11 @@ static inline void list_splice_init(struct list_head *list,
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
||||
prefetch(pos->member.next), &pos->member != (head); \
|
||||
pos = rcu_dereference(list_entry(pos->member.next, \
|
||||
typeof(*pos), member)))
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry((head)->next, typeof(*pos), member); \
|
||||
prefetch(rcu_dereference(pos)->member.next), \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry(pos->member.next, typeof(*pos), member))
|
||||
|
||||
|
||||
/**
|
||||
@@ -492,8 +495,9 @@ static inline void list_splice_init(struct list_head *list,
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_continue_rcu(pos, head) \
|
||||
for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
for ((pos) = (pos)->next; \
|
||||
prefetch(rcu_dereference((pos))->next), (pos) != (head); \
|
||||
(pos) = (pos)->next)
|
||||
|
||||
/*
|
||||
* Double linked lists with a single pointer list head.
|
||||
@@ -696,8 +700,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
pos = n)
|
||||
|
||||
#define hlist_for_each_rcu(pos, head) \
|
||||
for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \
|
||||
(pos) = rcu_dereference((pos)->next))
|
||||
for ((pos) = (head)->first; \
|
||||
rcu_dereference((pos)) && ({ prefetch((pos)->next); 1; }); \
|
||||
(pos) = (pos)->next)
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry - iterate over list of given type
|
||||
@@ -762,9 +767,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
|
||||
for (pos = (head)->first; \
|
||||
pos && ({ prefetch(pos->next); 1;}) && \
|
||||
rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
|
||||
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
pos = pos->next)
|
||||
|
||||
#else
|
||||
#warning "don't include kernel headers in userspace"
|
||||
|
||||
@@ -94,6 +94,7 @@ struct rcu_data {
|
||||
long batch; /* Batch # for current RCU batch */
|
||||
struct rcu_head *nxtlist;
|
||||
struct rcu_head **nxttail;
|
||||
long count; /* # of queued items */
|
||||
struct rcu_head *curlist;
|
||||
struct rcu_head **curtail;
|
||||
struct rcu_head *donelist;
|
||||
|
||||
Reference in New Issue
Block a user