mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 04:31:50 +00:00
gcc-4.6: mm: fix unused but set warnings
No real bugs, just some dead code and some fixups. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
627295e492
commit
4e60c86bd9
@ -126,8 +126,8 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
|
||||
/* x86-64 always has all page tables mapped. */
|
||||
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
|
||||
#define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
|
||||
#define pte_unmap(pte) /* NOP */
|
||||
#define pte_unmap_nested(pte) /* NOP */
|
||||
#define pte_unmap(pte) ((void)(pte))/* NOP */
|
||||
#define pte_unmap_nested(pte) ((void)(pte)) /* NOP */
|
||||
|
||||
#define update_mmu_cache(vma, address, ptep) do { } while (0)
|
||||
|
||||
|
@ -73,7 +73,11 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
|
||||
}
|
||||
#define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
|
||||
|
||||
#define kunmap_atomic_notypecheck(addr, idx) do { pagefault_enable(); } while (0)
|
||||
static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx)
|
||||
{
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
#define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
|
||||
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
||||
|
||||
|
@ -4,7 +4,7 @@
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
#define VM_BUG_ON(cond) BUG_ON(cond)
|
||||
#else
|
||||
#define VM_BUG_ON(cond) do { } while (0)
|
||||
#define VM_BUG_ON(cond) do { (void)(cond); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
|
@ -2238,14 +2238,12 @@ static ssize_t generic_perform_write(struct file *file,
|
||||
|
||||
do {
|
||||
struct page *page;
|
||||
pgoff_t index; /* Pagecache index for current page */
|
||||
unsigned long offset; /* Offset into pagecache page */
|
||||
unsigned long bytes; /* Bytes to write to page */
|
||||
size_t copied; /* Bytes copied from user */
|
||||
void *fsdata;
|
||||
|
||||
offset = (pos & (PAGE_CACHE_SIZE - 1));
|
||||
index = pos >> PAGE_CACHE_SHIFT;
|
||||
bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
|
||||
iov_iter_count(i));
|
||||
|
||||
|
@ -307,7 +307,6 @@ void free_pgd_range(struct mmu_gather *tlb,
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
unsigned long start;
|
||||
|
||||
/*
|
||||
* The next few lines have given us lots of grief...
|
||||
@ -351,7 +350,6 @@ void free_pgd_range(struct mmu_gather *tlb,
|
||||
if (addr > end - 1)
|
||||
return;
|
||||
|
||||
start = addr;
|
||||
pgd = pgd_offset(tlb->mm, addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
|
@ -394,7 +394,7 @@ static void kmem_list3_init(struct kmem_list3 *parent)
|
||||
#define STATS_DEC_ACTIVE(x) do { } while (0)
|
||||
#define STATS_INC_ALLOCED(x) do { } while (0)
|
||||
#define STATS_INC_GROWN(x) do { } while (0)
|
||||
#define STATS_ADD_REAPED(x,y) do { } while (0)
|
||||
#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
|
||||
#define STATS_SET_HIGH(x) do { } while (0)
|
||||
#define STATS_INC_ERR(x) do { } while (0)
|
||||
#define STATS_INC_NODEALLOCS(x) do { } while (0)
|
||||
|
Loading…
Reference in New Issue
Block a user