forked from Minki/linux
09cbfeaf1a
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
329 lines
8.2 KiB
C
329 lines
8.2 KiB
C
/*
|
|
* linux/fs/ext4/readpage.c
|
|
*
|
|
* Copyright (C) 2002, Linus Torvalds.
|
|
* Copyright (C) 2015, Google, Inc.
|
|
*
|
|
* This was originally taken from fs/mpage.c
|
|
*
|
|
* The intent is the ext4_mpage_readpages() function here is intended
|
|
* to replace mpage_readpages() in the general case, not just for
|
|
* encrypted files. It has some limitations (see below), where it
|
|
* will fall back to read_block_full_page(), but these limitations
|
|
* should only be hit when page_size != block_size.
|
|
*
|
|
* This will allow us to attach a callback function to support ext4
|
|
* encryption.
|
|
*
|
|
* If anything unusual happens, such as:
|
|
*
|
|
* - encountering a page which has buffers
|
|
* - encountering a page which has a non-hole after a hole
|
|
* - encountering a page with non-contiguous blocks
|
|
*
|
|
* then this code just gives up and calls the buffer_head-based read function.
|
|
* It does handle a page which has holes at the end - that is a common case:
|
|
* the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
|
|
*
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/kdev_t.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/buffer_head.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/mpage.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/pagevec.h>
|
|
#include <linux/cleancache.h>
|
|
|
|
#include "ext4.h"
|
|
|
|
/*
|
|
* Call ext4_decrypt on every single page, reusing the encryption
|
|
* context.
|
|
*/
|
|
static void completion_pages(struct work_struct *work)
|
|
{
|
|
#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
struct ext4_crypto_ctx *ctx =
|
|
container_of(work, struct ext4_crypto_ctx, r.work);
|
|
struct bio *bio = ctx->r.bio;
|
|
struct bio_vec *bv;
|
|
int i;
|
|
|
|
bio_for_each_segment_all(bv, bio, i) {
|
|
struct page *page = bv->bv_page;
|
|
|
|
int ret = ext4_decrypt(page);
|
|
if (ret) {
|
|
WARN_ON_ONCE(1);
|
|
SetPageError(page);
|
|
} else
|
|
SetPageUptodate(page);
|
|
unlock_page(page);
|
|
}
|
|
ext4_release_crypto_ctx(ctx);
|
|
bio_put(bio);
|
|
#else
|
|
BUG();
|
|
#endif
|
|
}
|
|
|
|
static inline bool ext4_bio_encrypted(struct bio *bio)
|
|
{
|
|
#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
return unlikely(bio->bi_private != NULL);
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* I/O completion handler for multipage BIOs.
|
|
*
|
|
* The mpage code never puts partial pages into a BIO (except for end-of-file).
|
|
* If a page does not map to a contiguous run of blocks then it simply falls
|
|
* back to block_read_full_page().
|
|
*
|
|
* Why is this? If a page's completion depends on a number of different BIOs
|
|
* which can complete in any order (or at the same time) then determining the
|
|
* status of that page is hard. See end_buffer_async_read() for the details.
|
|
* There is no point in duplicating all that complexity.
|
|
*/
|
|
static void mpage_end_io(struct bio *bio)
|
|
{
|
|
struct bio_vec *bv;
|
|
int i;
|
|
|
|
if (ext4_bio_encrypted(bio)) {
|
|
struct ext4_crypto_ctx *ctx = bio->bi_private;
|
|
|
|
if (bio->bi_error) {
|
|
ext4_release_crypto_ctx(ctx);
|
|
} else {
|
|
INIT_WORK(&ctx->r.work, completion_pages);
|
|
ctx->r.bio = bio;
|
|
queue_work(ext4_read_workqueue, &ctx->r.work);
|
|
return;
|
|
}
|
|
}
|
|
bio_for_each_segment_all(bv, bio, i) {
|
|
struct page *page = bv->bv_page;
|
|
|
|
if (!bio->bi_error) {
|
|
SetPageUptodate(page);
|
|
} else {
|
|
ClearPageUptodate(page);
|
|
SetPageError(page);
|
|
}
|
|
unlock_page(page);
|
|
}
|
|
|
|
bio_put(bio);
|
|
}
|
|
|
|
int ext4_mpage_readpages(struct address_space *mapping,
|
|
struct list_head *pages, struct page *page,
|
|
unsigned nr_pages)
|
|
{
|
|
struct bio *bio = NULL;
|
|
unsigned page_idx;
|
|
sector_t last_block_in_bio = 0;
|
|
|
|
struct inode *inode = mapping->host;
|
|
const unsigned blkbits = inode->i_blkbits;
|
|
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
|
|
const unsigned blocksize = 1 << blkbits;
|
|
sector_t block_in_file;
|
|
sector_t last_block;
|
|
sector_t last_block_in_file;
|
|
sector_t blocks[MAX_BUF_PER_PAGE];
|
|
unsigned page_block;
|
|
struct block_device *bdev = inode->i_sb->s_bdev;
|
|
int length;
|
|
unsigned relative_block = 0;
|
|
struct ext4_map_blocks map;
|
|
|
|
map.m_pblk = 0;
|
|
map.m_lblk = 0;
|
|
map.m_len = 0;
|
|
map.m_flags = 0;
|
|
|
|
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
|
|
int fully_mapped = 1;
|
|
unsigned first_hole = blocks_per_page;
|
|
|
|
prefetchw(&page->flags);
|
|
if (pages) {
|
|
page = list_entry(pages->prev, struct page, lru);
|
|
list_del(&page->lru);
|
|
if (add_to_page_cache_lru(page, mapping, page->index,
|
|
mapping_gfp_constraint(mapping, GFP_KERNEL)))
|
|
goto next_page;
|
|
}
|
|
|
|
if (page_has_buffers(page))
|
|
goto confused;
|
|
|
|
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
|
|
last_block = block_in_file + nr_pages * blocks_per_page;
|
|
last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
|
|
if (last_block > last_block_in_file)
|
|
last_block = last_block_in_file;
|
|
page_block = 0;
|
|
|
|
/*
|
|
* Map blocks using the previous result first.
|
|
*/
|
|
if ((map.m_flags & EXT4_MAP_MAPPED) &&
|
|
block_in_file > map.m_lblk &&
|
|
block_in_file < (map.m_lblk + map.m_len)) {
|
|
unsigned map_offset = block_in_file - map.m_lblk;
|
|
unsigned last = map.m_len - map_offset;
|
|
|
|
for (relative_block = 0; ; relative_block++) {
|
|
if (relative_block == last) {
|
|
/* needed? */
|
|
map.m_flags &= ~EXT4_MAP_MAPPED;
|
|
break;
|
|
}
|
|
if (page_block == blocks_per_page)
|
|
break;
|
|
blocks[page_block] = map.m_pblk + map_offset +
|
|
relative_block;
|
|
page_block++;
|
|
block_in_file++;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Then do more ext4_map_blocks() calls until we are
|
|
* done with this page.
|
|
*/
|
|
while (page_block < blocks_per_page) {
|
|
if (block_in_file < last_block) {
|
|
map.m_lblk = block_in_file;
|
|
map.m_len = last_block - block_in_file;
|
|
|
|
if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
|
|
set_error_page:
|
|
SetPageError(page);
|
|
zero_user_segment(page, 0,
|
|
PAGE_SIZE);
|
|
unlock_page(page);
|
|
goto next_page;
|
|
}
|
|
}
|
|
if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
|
|
fully_mapped = 0;
|
|
if (first_hole == blocks_per_page)
|
|
first_hole = page_block;
|
|
page_block++;
|
|
block_in_file++;
|
|
continue;
|
|
}
|
|
if (first_hole != blocks_per_page)
|
|
goto confused; /* hole -> non-hole */
|
|
|
|
/* Contiguous blocks? */
|
|
if (page_block && blocks[page_block-1] != map.m_pblk-1)
|
|
goto confused;
|
|
for (relative_block = 0; ; relative_block++) {
|
|
if (relative_block == map.m_len) {
|
|
/* needed? */
|
|
map.m_flags &= ~EXT4_MAP_MAPPED;
|
|
break;
|
|
} else if (page_block == blocks_per_page)
|
|
break;
|
|
blocks[page_block] = map.m_pblk+relative_block;
|
|
page_block++;
|
|
block_in_file++;
|
|
}
|
|
}
|
|
if (first_hole != blocks_per_page) {
|
|
zero_user_segment(page, first_hole << blkbits,
|
|
PAGE_SIZE);
|
|
if (first_hole == 0) {
|
|
SetPageUptodate(page);
|
|
unlock_page(page);
|
|
goto next_page;
|
|
}
|
|
} else if (fully_mapped) {
|
|
SetPageMappedToDisk(page);
|
|
}
|
|
if (fully_mapped && blocks_per_page == 1 &&
|
|
!PageUptodate(page) && cleancache_get_page(page) == 0) {
|
|
SetPageUptodate(page);
|
|
goto confused;
|
|
}
|
|
|
|
/*
|
|
* This page will go to BIO. Do we need to send this
|
|
* BIO off first?
|
|
*/
|
|
if (bio && (last_block_in_bio != blocks[0] - 1)) {
|
|
submit_and_realloc:
|
|
submit_bio(READ, bio);
|
|
bio = NULL;
|
|
}
|
|
if (bio == NULL) {
|
|
struct ext4_crypto_ctx *ctx = NULL;
|
|
|
|
if (ext4_encrypted_inode(inode) &&
|
|
S_ISREG(inode->i_mode)) {
|
|
ctx = ext4_get_crypto_ctx(inode);
|
|
if (IS_ERR(ctx))
|
|
goto set_error_page;
|
|
}
|
|
bio = bio_alloc(GFP_KERNEL,
|
|
min_t(int, nr_pages, BIO_MAX_PAGES));
|
|
if (!bio) {
|
|
if (ctx)
|
|
ext4_release_crypto_ctx(ctx);
|
|
goto set_error_page;
|
|
}
|
|
bio->bi_bdev = bdev;
|
|
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
|
|
bio->bi_end_io = mpage_end_io;
|
|
bio->bi_private = ctx;
|
|
}
|
|
|
|
length = first_hole << blkbits;
|
|
if (bio_add_page(bio, page, length, 0) < length)
|
|
goto submit_and_realloc;
|
|
|
|
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
|
|
(relative_block == map.m_len)) ||
|
|
(first_hole != blocks_per_page)) {
|
|
submit_bio(READ, bio);
|
|
bio = NULL;
|
|
} else
|
|
last_block_in_bio = blocks[blocks_per_page - 1];
|
|
goto next_page;
|
|
confused:
|
|
if (bio) {
|
|
submit_bio(READ, bio);
|
|
bio = NULL;
|
|
}
|
|
if (!PageUptodate(page))
|
|
block_read_full_page(page, ext4_get_block);
|
|
else
|
|
unlock_page(page);
|
|
next_page:
|
|
if (pages)
|
|
put_page(page);
|
|
}
|
|
BUG_ON(pages && !list_empty(pages));
|
|
if (bio)
|
|
submit_bio(READ, bio);
|
|
return 0;
|
|
}
|