mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
7988613b0e
More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Paul Clements <Paul.Clements@steeleye.com> Cc: Jim Paris <jim@jtan.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: support@lsi.com Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Matthew Wilcox <matthew.r.wilcox@intel.com> Cc: Keith Busch <keith.busch@intel.com> Cc: Stephen Hemminger <shemminger@vyatta.com> Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: "Darrick J. Wong" <darrick.wong@oracle.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jan Kara <jack@suse.cz> Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand <geoff@infradead.org>
288 lines
6.4 KiB
C
288 lines
6.4 KiB
C
/* bounce buffer handling for block devices
|
|
*
|
|
* - Split from highmem.c
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/export.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/blkdev.h>
|
|
#include <linux/init.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/bootmem.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
#include <trace/events/block.h>
|
|
|
|
#define POOL_SIZE 64
|
|
#define ISA_POOL_SIZE 16
|
|
|
|
static mempool_t *page_pool, *isa_page_pool;
|
|
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
|
|
static __init int init_emergency_pool(void)
|
|
{
|
|
#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
|
|
if (max_pfn <= max_low_pfn)
|
|
return 0;
|
|
#endif
|
|
|
|
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
|
|
BUG_ON(!page_pool);
|
|
printk("bounce pool size: %d pages\n", POOL_SIZE);
|
|
|
|
return 0;
|
|
}
|
|
|
|
__initcall(init_emergency_pool);
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
/*
|
|
* highmem version, map in to vec
|
|
*/
|
|
static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
|
|
{
|
|
unsigned long flags;
|
|
unsigned char *vto;
|
|
|
|
local_irq_save(flags);
|
|
vto = kmap_atomic(to->bv_page);
|
|
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
|
|
kunmap_atomic(vto);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#else /* CONFIG_HIGHMEM */
|
|
|
|
#define bounce_copy_vec(to, vfrom) \
|
|
memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
/*
|
|
* allocate pages in the DMA region for the ISA pool
|
|
*/
|
|
static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
|
|
{
|
|
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
|
|
}
|
|
|
|
/*
|
|
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
|
|
* as the max address, so check if the pool has already been created.
|
|
*/
|
|
int init_emergency_isa_pool(void)
|
|
{
|
|
if (isa_page_pool)
|
|
return 0;
|
|
|
|
isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
|
|
mempool_free_pages, (void *) 0);
|
|
BUG_ON(!isa_page_pool);
|
|
|
|
printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Simple bounce buffer support for highmem pages. Depending on the
|
|
* queue gfp mask set, *to may or may not be a highmem page. kmap it
|
|
* always, it will do the Right Thing
|
|
*/
|
|
static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
|
|
{
|
|
unsigned char *vfrom;
|
|
struct bio_vec tovec, *fromvec = from->bi_io_vec;
|
|
struct bvec_iter iter;
|
|
|
|
bio_for_each_segment(tovec, to, iter) {
|
|
if (tovec.bv_page != fromvec->bv_page) {
|
|
/*
|
|
* fromvec->bv_offset and fromvec->bv_len might have
|
|
* been modified by the block layer, so use the original
|
|
* copy, bounce_copy_vec already uses tovec->bv_len
|
|
*/
|
|
vfrom = page_address(fromvec->bv_page) +
|
|
tovec.bv_offset;
|
|
|
|
bounce_copy_vec(&tovec, vfrom);
|
|
flush_dcache_page(tovec.bv_page);
|
|
}
|
|
|
|
fromvec++;
|
|
}
|
|
}
|
|
|
|
static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
|
|
{
|
|
struct bio *bio_orig = bio->bi_private;
|
|
struct bio_vec *bvec, *org_vec;
|
|
int i;
|
|
|
|
if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
|
|
set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
|
|
|
|
/*
|
|
* free up bounce indirect pages used
|
|
*/
|
|
bio_for_each_segment_all(bvec, bio, i) {
|
|
org_vec = bio_orig->bi_io_vec + i;
|
|
if (bvec->bv_page == org_vec->bv_page)
|
|
continue;
|
|
|
|
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
|
|
mempool_free(bvec->bv_page, pool);
|
|
}
|
|
|
|
bio_endio(bio_orig, err);
|
|
bio_put(bio);
|
|
}
|
|
|
|
static void bounce_end_io_write(struct bio *bio, int err)
|
|
{
|
|
bounce_end_io(bio, page_pool, err);
|
|
}
|
|
|
|
static void bounce_end_io_write_isa(struct bio *bio, int err)
|
|
{
|
|
|
|
bounce_end_io(bio, isa_page_pool, err);
|
|
}
|
|
|
|
static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
|
|
{
|
|
struct bio *bio_orig = bio->bi_private;
|
|
|
|
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
|
|
copy_to_high_bio_irq(bio_orig, bio);
|
|
|
|
bounce_end_io(bio, pool, err);
|
|
}
|
|
|
|
static void bounce_end_io_read(struct bio *bio, int err)
|
|
{
|
|
__bounce_end_io_read(bio, page_pool, err);
|
|
}
|
|
|
|
static void bounce_end_io_read_isa(struct bio *bio, int err)
|
|
{
|
|
__bounce_end_io_read(bio, isa_page_pool, err);
|
|
}
|
|
|
|
#ifdef CONFIG_NEED_BOUNCE_POOL
|
|
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
|
|
{
|
|
if (bio_data_dir(bio) != WRITE)
|
|
return 0;
|
|
|
|
if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
|
|
return 0;
|
|
|
|
return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
|
|
}
|
|
#else
|
|
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_NEED_BOUNCE_POOL */
|
|
|
|
static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|
mempool_t *pool, int force)
|
|
{
|
|
struct bio *bio;
|
|
int rw = bio_data_dir(*bio_orig);
|
|
struct bio_vec *to, from;
|
|
struct bvec_iter iter;
|
|
unsigned i;
|
|
|
|
if (force)
|
|
goto bounce;
|
|
bio_for_each_segment(from, *bio_orig, iter)
|
|
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
|
|
goto bounce;
|
|
|
|
return;
|
|
bounce:
|
|
bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
|
|
|
|
bio_for_each_segment_all(to, bio, i) {
|
|
struct page *page = to->bv_page;
|
|
|
|
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
|
|
continue;
|
|
|
|
inc_zone_page_state(to->bv_page, NR_BOUNCE);
|
|
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
|
|
|
|
if (rw == WRITE) {
|
|
char *vto, *vfrom;
|
|
|
|
flush_dcache_page(page);
|
|
|
|
vto = page_address(to->bv_page) + to->bv_offset;
|
|
vfrom = kmap_atomic(page) + to->bv_offset;
|
|
memcpy(vto, vfrom, to->bv_len);
|
|
kunmap_atomic(vfrom);
|
|
}
|
|
}
|
|
|
|
trace_block_bio_bounce(q, *bio_orig);
|
|
|
|
bio->bi_flags |= (1 << BIO_BOUNCED);
|
|
|
|
if (pool == page_pool) {
|
|
bio->bi_end_io = bounce_end_io_write;
|
|
if (rw == READ)
|
|
bio->bi_end_io = bounce_end_io_read;
|
|
} else {
|
|
bio->bi_end_io = bounce_end_io_write_isa;
|
|
if (rw == READ)
|
|
bio->bi_end_io = bounce_end_io_read_isa;
|
|
}
|
|
|
|
bio->bi_private = *bio_orig;
|
|
*bio_orig = bio;
|
|
}
|
|
|
|
void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
|
{
|
|
int must_bounce;
|
|
mempool_t *pool;
|
|
|
|
/*
|
|
* Data-less bio, nothing to bounce
|
|
*/
|
|
if (!bio_has_data(*bio_orig))
|
|
return;
|
|
|
|
must_bounce = must_snapshot_stable_pages(q, *bio_orig);
|
|
|
|
/*
|
|
* for non-isa bounce case, just check if the bounce pfn is equal
|
|
* to or bigger than the highest pfn in the system -- in that case,
|
|
* don't waste time iterating over bio segments
|
|
*/
|
|
if (!(q->bounce_gfp & GFP_DMA)) {
|
|
if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
|
|
return;
|
|
pool = page_pool;
|
|
} else {
|
|
BUG_ON(!isa_page_pool);
|
|
pool = isa_page_pool;
|
|
}
|
|
|
|
/*
|
|
* slow path
|
|
*/
|
|
__blk_queue_bounce(q, bio_orig, pool, must_bounce);
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_queue_bounce);
|