mirror of
https://github.com/torvalds/linux.git
synced 2024-12-04 01:51:34 +00:00
block: add helpers to run flush_dcache_page() against a bio and a request's pages
Mtdblock driver doesn't call flush_dcache_page for pages in request. So, this causes problems on architectures where the icache doesn't fill from the dcache or with dcache aliases. The patch fixes this. The ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE symbol was introduced to avoid pointless empty cache-thrashing loops on architectures for which flush_dcache_page() is a no-op. Every architecture was provided with this flush pages on architectires where ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE is equal 1 or do nothing otherwise. See "fix mtd_blkdevs problem with caches on some architectures" discussion on LKML for more information. Signed-off-by: Ilya Loginov <isloginov@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Peter Horton <phorton@bitbox.co.uk> Cc: "Ed L. Cashin" <ecashin@coraid.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
3586e917f2
commit
2d4dc890b5
@ -9,6 +9,7 @@
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -408,6 +408,7 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
|
|||||||
* about to change to user space. This is the same method as used on SPARC64.
|
* about to change to user space. This is the same method as used on SPARC64.
|
||||||
* See update_mmu_cache for the user space part.
|
* See update_mmu_cache for the user space part.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *);
|
extern void flush_dcache_page(struct page *);
|
||||||
|
|
||||||
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
|
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
|
||||||
|
@ -107,6 +107,7 @@ extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
|||||||
* do something here, but only for certain configurations. No such
|
* do something here, but only for certain configurations. No such
|
||||||
* configurations exist at this time.
|
* configurations exist at this time.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(page) do { } while (0)
|
#define flush_dcache_mmap_lock(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(page) do { } while (0)
|
#define flush_dcache_mmap_unlock(page) do { } while (0)
|
||||||
|
@ -68,9 +68,11 @@ do { memcpy(dst, src, len); \
|
|||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
|
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
|
||||||
# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end))
|
# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end))
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
|
# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
|
||||||
#else
|
#else
|
||||||
# define flush_dcache_range(start,end) do { } while (0)
|
# define flush_dcache_range(start,end) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
# define flush_dcache_page(page) do { } while (0)
|
# define flush_dcache_page(page) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -47,6 +47,7 @@ static inline void __flush_cache_all(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* dcache/icache coherency... */
|
/* dcache/icache coherency... */
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
extern void flush_dcache_page(struct page *page);
|
extern void flush_dcache_page(struct page *page);
|
||||||
#else
|
#else
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma,a,b)
|
#define flush_cache_range(vma,a,b)
|
||||||
#define flush_cache_page(vma,p,pfn)
|
#define flush_cache_page(vma,p,pfn)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page)
|
#define flush_dcache_page(page)
|
||||||
#define flush_dcache_mmap_lock(mapping)
|
#define flush_dcache_mmap_lock(mapping)
|
||||||
#define flush_dcache_mmap_unlock(mapping)
|
#define flush_dcache_mmap_unlock(mapping)
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#define flush_cache_vmap(start, end) do { } while (0)
|
#define flush_cache_vmap(start, end) do { } while (0)
|
||||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
#define flush_dcache_page(page) \
|
#define flush_dcache_page(page) \
|
||||||
do { \
|
do { \
|
||||||
clear_bit(PG_arch_1, &(page)->flags); \
|
clear_bit(PG_arch_1, &(page)->flags); \
|
||||||
|
@ -12,6 +12,7 @@ extern void _flush_cache_copyback_all(void);
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
@ -33,6 +34,7 @@ extern void smp_flush_cache_all(void);
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
@ -46,6 +48,7 @@ extern void smp_flush_cache_all(void);
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -128,6 +128,7 @@ static inline void __flush_page_to_ram(void *vaddr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#define flush_cache_range(vma, start, end) __flush_cache_all()
|
#define flush_cache_range(vma, start, end) __flush_cache_all()
|
||||||
#define flush_cache_page(vma, vmaddr) do { } while (0)
|
#define flush_cache_page(vma, vmaddr) do { } while (0)
|
||||||
#define flush_dcache_range(start,len) __flush_cache_all()
|
#define flush_dcache_range(start,len) __flush_cache_all()
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
|
||||||
#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
|
#define flush_dcache_range(start, end) __invalidate_dcache_range(start, end)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -38,6 +38,7 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
|
|||||||
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
|
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
|
||||||
extern void __flush_dcache_page(struct page *page);
|
extern void __flush_dcache_page(struct page *page);
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
static inline void flush_dcache_page(struct page *page)
|
static inline void flush_dcache_page(struct page *page)
|
||||||
{
|
{
|
||||||
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
|
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do {} while (0)
|
||||||
#define flush_cache_vmap(start, end) do {} while (0)
|
#define flush_cache_vmap(start, end) do {} while (0)
|
||||||
#define flush_cache_vunmap(start, end) do {} while (0)
|
#define flush_cache_vunmap(start, end) do {} while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do {} while (0)
|
#define flush_dcache_page(page) do {} while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do {} while (0)
|
#define flush_dcache_mmap_lock(mapping) do {} while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
|
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
|
||||||
|
@ -42,6 +42,7 @@ void flush_cache_mm(struct mm_struct *mm);
|
|||||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *page);
|
extern void flush_dcache_page(struct page *page);
|
||||||
|
|
||||||
#define flush_dcache_mmap_lock(mapping) \
|
#define flush_dcache_mmap_lock(mapping) \
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#define flush_cache_vmap(start, end) do { } while (0)
|
#define flush_cache_vmap(start, end) do { } while (0)
|
||||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *page);
|
extern void flush_dcache_page(struct page *page);
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -16,6 +16,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
|
|||||||
extern void flush_dcache_range(unsigned long start, unsigned long end);
|
extern void flush_dcache_range(unsigned long start, unsigned long end);
|
||||||
|
|
||||||
#define flush_cache_dup_mm(mm) do {} while (0)
|
#define flush_cache_dup_mm(mm) do {} while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do {} while (0)
|
#define flush_dcache_page(page) do {} while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do {} while (0)
|
#define flush_dcache_mmap_lock(mapping) do {} while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
|
#define flush_dcache_mmap_unlock(mapping) do {} while (0)
|
||||||
|
@ -42,6 +42,7 @@ extern void flush_cache_page(struct vm_area_struct *vma,
|
|||||||
unsigned long addr, unsigned long pfn);
|
unsigned long addr, unsigned long pfn);
|
||||||
extern void flush_cache_range(struct vm_area_struct *vma,
|
extern void flush_cache_range(struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *page);
|
extern void flush_dcache_page(struct page *page);
|
||||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||||
|
@ -75,6 +75,7 @@ BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
|
|||||||
|
|
||||||
extern void sparc_flush_page_to_ram(struct page *page);
|
extern void sparc_flush_page_to_ram(struct page *page);
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
|
#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -37,6 +37,7 @@ extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void __flush_dcache_range(unsigned long start, unsigned long end);
|
extern void __flush_dcache_range(unsigned long start, unsigned long end);
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page *page);
|
extern void flush_dcache_page(struct page *page);
|
||||||
|
|
||||||
#define flush_icache_page(vma, pg) do { } while(0)
|
#define flush_icache_page(vma, pg) do { } while(0)
|
||||||
|
@ -12,6 +12,7 @@ static inline void flush_cache_range(struct vm_area_struct *vma,
|
|||||||
unsigned long start, unsigned long end) { }
|
unsigned long start, unsigned long end) { }
|
||||||
static inline void flush_cache_page(struct vm_area_struct *vma,
|
static inline void flush_cache_page(struct vm_area_struct *vma,
|
||||||
unsigned long vmaddr, unsigned long pfn) { }
|
unsigned long vmaddr, unsigned long pfn) { }
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
static inline void flush_dcache_page(struct page *page) { }
|
static inline void flush_dcache_page(struct page *page) { }
|
||||||
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
|
static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
|
||||||
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
|
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
|
||||||
|
@ -101,6 +101,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
|
|||||||
#define flush_cache_vmap(start,end) flush_cache_all()
|
#define flush_cache_vmap(start,end) flush_cache_all()
|
||||||
#define flush_cache_vunmap(start,end) flush_cache_all()
|
#define flush_cache_vunmap(start,end) flush_cache_all()
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
extern void flush_dcache_page(struct page*);
|
extern void flush_dcache_page(struct page*);
|
||||||
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
|
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
|
||||||
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
|
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
|
||||||
|
@ -2358,6 +2358,25 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
|||||||
rq->rq_disk = bio->bi_bdev->bd_disk;
|
rq->rq_disk = bio->bi_bdev->bd_disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
/**
|
||||||
|
* rq_flush_dcache_pages - Helper function to flush all pages in a request
|
||||||
|
* @rq: the request to be flushed
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Flush all pages in @rq.
|
||||||
|
*/
|
||||||
|
void rq_flush_dcache_pages(struct request *rq)
|
||||||
|
{
|
||||||
|
struct req_iterator iter;
|
||||||
|
struct bio_vec *bvec;
|
||||||
|
|
||||||
|
rq_for_each_segment(bvec, rq, iter)
|
||||||
|
flush_dcache_page(bvec->bv_page);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
|
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
|
||||||
* @q : the queue of the device being checked
|
* @q : the queue of the device being checked
|
||||||
|
@ -59,12 +59,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||||
if (tr->readsect(dev, block, buf))
|
if (tr->readsect(dev, block, buf))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
rq_flush_dcache_pages(req);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
case WRITE:
|
case WRITE:
|
||||||
if (!tr->writesect)
|
if (!tr->writesect)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
rq_flush_dcache_pages(req);
|
||||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||||
if (tr->writesect(dev, block, buf))
|
if (tr->writesect(dev, block, buf))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
12
fs/bio.c
12
fs/bio.c
@ -1393,6 +1393,18 @@ void bio_check_pages_dirty(struct bio *bio)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
void bio_flush_dcache_pages(struct bio *bi)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct bio_vec *bvec;
|
||||||
|
|
||||||
|
bio_for_each_segment(bvec, bi, i)
|
||||||
|
flush_dcache_page(bvec->bv_page);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(bio_flush_dcache_pages);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_endio - end I/O on a bio
|
* bio_endio - end I/O on a bio
|
||||||
* @bio: bio
|
* @bio: bio
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) do { } while (0)
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define flush_dcache_page(page) do { } while (0)
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
@ -391,6 +391,18 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
|
|||||||
gfp_t, int);
|
gfp_t, int);
|
||||||
extern void bio_set_pages_dirty(struct bio *bio);
|
extern void bio_set_pages_dirty(struct bio *bio);
|
||||||
extern void bio_check_pages_dirty(struct bio *bio);
|
extern void bio_check_pages_dirty(struct bio *bio);
|
||||||
|
|
||||||
|
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
|
||||||
|
#endif
|
||||||
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
extern void bio_flush_dcache_pages(struct bio *bi);
|
||||||
|
#else
|
||||||
|
static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
|
extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
|
||||||
unsigned long, unsigned int, int, gfp_t);
|
unsigned long, unsigned int, int, gfp_t);
|
||||||
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||||
|
@ -752,6 +752,17 @@ struct req_iterator {
|
|||||||
#define rq_iter_last(rq, _iter) \
|
#define rq_iter_last(rq, _iter) \
|
||||||
(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
|
(_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
|
||||||
|
|
||||||
|
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
|
||||||
|
#endif
|
||||||
|
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||||
|
extern void rq_flush_dcache_pages(struct request *rq);
|
||||||
|
#else
|
||||||
|
static inline void rq_flush_dcache_pages(struct request *rq)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
extern int blk_register_queue(struct gendisk *disk);
|
extern int blk_register_queue(struct gendisk *disk);
|
||||||
extern void blk_unregister_queue(struct gendisk *disk);
|
extern void blk_unregister_queue(struct gendisk *disk);
|
||||||
extern void register_disk(struct gendisk *dev);
|
extern void register_disk(struct gendisk *dev);
|
||||||
|
Loading…
Reference in New Issue
Block a user