From 367636772f094fd840d2d79e75257bcfaa28e70f Mon Sep 17 00:00:00 2001 From: Anton Altaparmakov Date: Thu, 18 Nov 2004 13:46:45 +0000 Subject: [PATCH] NTFS: - In fs/ntfs/compress.c, use i_size_read() at the start and then use the cached value everywhere. Cache the initialized_size in the same way and protect the critical region where the two sizes are read using the new size_lock of the ntfs inode. - Add the new size_lock to the ntfs_inode structure (fs/ntfs/inode.h) and initialize it (fs/ntfs/inode.c). Signed-off-by: Anton Altaparmakov --- fs/ntfs/ChangeLog | 7 +++++++ fs/ntfs/compress.c | 46 ++++++++++++++++++++++++++++------------------ fs/ntfs/inode.c | 1 + fs/ntfs/inode.h | 1 + 4 files changed, 37 insertions(+), 18 deletions(-) diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index 6d2a99c13d30..97c4fe932046 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog @@ -34,6 +34,13 @@ ToDo/Notes: - Use i_size_read() once and then use the cached value in fs/ntfs/lcnalloc.c::ntfs_cluster_alloc(). - Use i_size_read() in fs/ntfs/file.c::ntfs_file_open(). + - Add size_lock to the ntfs_inode structure. This is an rw spinlock + and it locks against access to the inode sizes. Note, ->size_lock + is also accessed from irq context so you must use the _irqsave and + _irqrestore lock and unlock functions, respectively. + - Use i_size_read() in fs/ntfs/compress.c at the start of the read and + use the cached value afterwards. Cache the initialized_size in the + same way and protect access to the two sizes using the size_lock. 2.1.22 - Many bug and race fixes and error handling improvements. diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index ee5ae706f861..6d265cfd49aa 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -96,13 +96,14 @@ void free_compression_buffers(void) /** * zero_partial_compressed_page - zero out of bounds compressed page region */ -static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) +static void zero_partial_compressed_page(struct page *page, + const s64 initialized_size) { u8 *kp = page_address(page); unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) { + if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { /* * FIXME: Using clear_page() will become wrong when we get * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. @@ -110,7 +111,7 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) clear_page(kp); return; } - kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK; + kp_ofs = initialized_size & ~PAGE_CACHE_MASK; memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); return; } @@ -118,12 +119,12 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) /** * handle_bounds_compressed_page - test for&handle out of bounds compressed page */ -static inline void handle_bounds_compressed_page(ntfs_inode *ni, - struct page *page) +static inline void handle_bounds_compressed_page(struct page *page, + const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) && - (ni->initialized_size < VFS_I(ni)->i_size)) - zero_partial_compressed_page(ni, page); + if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + (initialized_size < i_size)) + zero_partial_compressed_page(page, initialized_size); return; } @@ -138,6 +139,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni, * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) * @cb_start: compression block to decompress (IN) * @cb_size: size of compression block @cb_start in bytes (IN) + * @i_size: file size when we started the read (IN) + * @initialized_size: initialized file size when we started the read (IN) * * The caller must have disabled preemption. ntfs_decompress() reenables it when * the critical section is finished. @@ -165,7 +168,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni, static int ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, - const u32 cb_size) + const u32 cb_size, const loff_t i_size, + const s64 initialized_size) { /* * Pointers into the compressed data, i.e. the compression block (cb), @@ -219,9 +223,6 @@ return_error: spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ if (nr_completed_pages > 0) { - struct page *page = dest_pages[completed_pages[0]]; - ntfs_inode *ni = NTFS_I(page->mapping->host); - for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; @@ -230,7 +231,8 @@ return_error: * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, dp); + handle_bounds_compressed_page(dp, i_size, + initialized_size); flush_dcache_page(dp); kunmap(dp); SetPageUptodate(dp); @@ -478,12 +480,14 @@ return_overflow: */ int ntfs_read_compressed_block(struct page *page) { + loff_t i_size; + s64 initialized_size; struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); ntfs_volume *vol = ni->vol; struct super_block *sb = vol->sb; runlist_element *rl; - unsigned long block_size = sb->s_blocksize; + unsigned long flags, block_size = sb->s_blocksize; unsigned char block_size_bits = sb->s_blocksize_bits; u8 *cb, *cb_pos, *cb_end; struct buffer_head **bhs; @@ -552,8 +556,12 @@ int ntfs_read_compressed_block(struct page *page) * The remaining pages need to be allocated and inserted into the page * cache, alignment guarantees keep all the below much simpler. (-8 */ - max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT) - offset; + read_lock_irqsave(&ni->size_lock, flags); + i_size = i_size_read(VFS_I(ni)); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + offset; if (nr_pages < max_page) max_page = nr_pages; for (i = 0; i < max_page; i++, offset++) { @@ -824,7 +832,8 @@ lock_retry_remap: * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, page); + handle_bounds_compressed_page(page, i_size, + initialized_size); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); @@ -847,7 +856,8 @@ lock_retry_remap: ntfs_debug("Found compressed compression block."); err = ntfs_decompress(pages, &cur_page, &cur_ofs, cb_max_page, cb_max_ofs, xpage, &xpage_done, - cb_pos, cb_size - (cb_pos - cb)); + cb_pos, cb_size - (cb_pos - cb), i_size, + initialized_size); /* * We can sleep from now on, lock already dropped by * ntfs_decompress(). diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 31840ba0b38c..a02d8d9f0439 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -376,6 +376,7 @@ static void ntfs_destroy_extent_inode(ntfs_inode *ni) void __ntfs_init_inode(struct super_block *sb, ntfs_inode *ni) { ntfs_debug("Entering."); + rwlock_init(&ni->size_lock); ni->initialized_size = ni->allocated_size = 0; ni->seq_no = 0; atomic_set(&ni->count, 1); diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h index 99580455f2ed..56b2d6ec33f7 100644 --- a/fs/ntfs/inode.h +++ b/fs/ntfs/inode.h @@ -44,6 +44,7 @@ typedef struct _ntfs_inode ntfs_inode; * fields already provided in the VFS inode. */ struct _ntfs_inode { + rwlock_t size_lock; /* Lock serializing access to inode sizes. */ s64 initialized_size; /* Copy from the attribute record. */ s64 allocated_size; /* Copy from the attribute record. */ unsigned long state; /* NTFS specific flags describing this inode.