2009-01-05 08:46:26 +00:00
|
|
|
/*
|
|
|
|
* Squashfs - a compressed read only filesystem for Linux
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
|
2011-05-26 09:39:56 +00:00
|
|
|
* Phillip Lougher <phillip@squashfs.org.uk>
|
2009-01-05 08:46:26 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2,
|
|
|
|
* or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
|
|
|
* cache.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Blocks in Squashfs are compressed. To avoid repeatedly decompressing
|
|
|
|
* recently accessed data Squashfs uses two small metadata and fragment caches.
|
|
|
|
*
|
|
|
|
* This file implements a generic cache implementation used for both caches,
|
|
|
|
* plus functions layered ontop of the generic cache implementation to
|
|
|
|
* access the metadata and fragment caches.
|
|
|
|
*
|
2011-05-10 08:16:21 +00:00
|
|
|
* To avoid out of memory and fragmentation issues with vmalloc the cache
|
2016-04-01 12:29:48 +00:00
|
|
|
* uses sequences of kmalloced PAGE_SIZE buffers.
|
2009-01-05 08:46:26 +00:00
|
|
|
*
|
|
|
|
* It should be noted that the cache is not used for file datablocks, these
|
|
|
|
* are decompressed and cached in the page-cache in the normal way. The
|
|
|
|
* cache is only used to temporarily cache fragment and metadata blocks
|
|
|
|
* which have been read as as a result of a metadata (i.e. inode or
|
|
|
|
* directory) or fragment access. Because metadata and fragments are packed
|
|
|
|
* together into blocks (to gain greater compression) the read of a particular
|
|
|
|
* piece of metadata or fragment will retrieve other metadata/fragments which
|
|
|
|
* have been packed with it, these because of locality-of-reference may be read
|
|
|
|
* in the near future. Temporarily caching them ensures they are available for
|
|
|
|
* near future access without requiring an additional read and decompress.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/vfs.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
|
|
|
|
#include "squashfs_fs.h"
|
|
|
|
#include "squashfs_fs_sb.h"
|
|
|
|
#include "squashfs.h"
|
2013-11-18 02:59:12 +00:00
|
|
|
#include "page_actor.h"
|
2009-01-05 08:46:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Look-up block in cache, and increment usage count. If not in cache, read
|
|
|
|
* and decompress it from disk.
|
|
|
|
*/
|
|
|
|
struct squashfs_cache_entry *squashfs_cache_get(struct super_block *sb,
|
|
|
|
struct squashfs_cache *cache, u64 block, int length)
|
|
|
|
{
|
|
|
|
int i, n;
|
|
|
|
struct squashfs_cache_entry *entry;
|
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
|
|
|
|
while (1) {
|
2011-12-27 09:40:04 +00:00
|
|
|
for (i = cache->curr_blk, n = 0; n < cache->entries; n++) {
|
|
|
|
if (cache->entry[i].block == block) {
|
|
|
|
cache->curr_blk = i;
|
2009-01-05 08:46:26 +00:00
|
|
|
break;
|
2011-12-27 09:40:04 +00:00
|
|
|
}
|
|
|
|
i = (i + 1) % cache->entries;
|
|
|
|
}
|
2009-01-05 08:46:26 +00:00
|
|
|
|
2011-12-27 09:40:04 +00:00
|
|
|
if (n == cache->entries) {
|
2009-01-05 08:46:26 +00:00
|
|
|
/*
|
|
|
|
* Block not in cache, if all cache entries are used
|
|
|
|
* go to sleep waiting for one to become available.
|
|
|
|
*/
|
|
|
|
if (cache->unused == 0) {
|
|
|
|
cache->num_waiters++;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
wait_event(cache->wait_queue, cache->unused);
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
cache->num_waiters--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At least one unused cache entry. A simple
|
|
|
|
* round-robin strategy is used to choose the entry to
|
|
|
|
* be evicted from the cache.
|
|
|
|
*/
|
|
|
|
i = cache->next_blk;
|
|
|
|
for (n = 0; n < cache->entries; n++) {
|
|
|
|
if (cache->entry[i].refcount == 0)
|
|
|
|
break;
|
|
|
|
i = (i + 1) % cache->entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->next_blk = (i + 1) % cache->entries;
|
|
|
|
entry = &cache->entry[i];
|
|
|
|
|
|
|
|
/*
|
2011-03-31 01:57:33 +00:00
|
|
|
* Initialise chosen cache entry, and fill it in from
|
2009-01-05 08:46:26 +00:00
|
|
|
* disk.
|
|
|
|
*/
|
|
|
|
cache->unused--;
|
|
|
|
entry->block = block;
|
|
|
|
entry->refcount = 1;
|
|
|
|
entry->pending = 1;
|
|
|
|
entry->num_waiters = 0;
|
|
|
|
entry->error = 0;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
2013-11-18 02:59:12 +00:00
|
|
|
entry->length = squashfs_read_data(sb, block, length,
|
|
|
|
&entry->next_index, entry->actor);
|
2009-01-05 08:46:26 +00:00
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
|
|
|
|
if (entry->length < 0)
|
|
|
|
entry->error = entry->length;
|
|
|
|
|
|
|
|
entry->pending = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* While filling this entry one or more other processes
|
|
|
|
* have looked it up in the cache, and have slept
|
|
|
|
* waiting for it to become available.
|
|
|
|
*/
|
|
|
|
if (entry->num_waiters) {
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
wake_up_all(&entry->wait_queue);
|
|
|
|
} else
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Block already in cache. Increment refcount so it doesn't
|
|
|
|
* get reused until we're finished with it, if it was
|
|
|
|
* previously unused there's one less cache entry available
|
|
|
|
* for reuse.
|
|
|
|
*/
|
|
|
|
entry = &cache->entry[i];
|
|
|
|
if (entry->refcount == 0)
|
|
|
|
cache->unused--;
|
|
|
|
entry->refcount++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the entry is currently being filled in by another process
|
|
|
|
* go to sleep waiting for it to become available.
|
|
|
|
*/
|
|
|
|
if (entry->pending) {
|
|
|
|
entry->num_waiters++;
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
wait_event(entry->wait_queue, !entry->pending);
|
|
|
|
} else
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
TRACE("Got %s %d, start block %lld, refcount %d, error %d\n",
|
|
|
|
cache->name, i, entry->block, entry->refcount, entry->error);
|
|
|
|
|
|
|
|
if (entry->error)
|
|
|
|
ERROR("Unable to read %s cache entry [%llx]\n", cache->name,
|
|
|
|
block);
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release cache entry, once usage count is zero it can be reused.
|
|
|
|
*/
|
|
|
|
void squashfs_cache_put(struct squashfs_cache_entry *entry)
|
|
|
|
{
|
|
|
|
struct squashfs_cache *cache = entry->cache;
|
|
|
|
|
|
|
|
spin_lock(&cache->lock);
|
|
|
|
entry->refcount--;
|
|
|
|
if (entry->refcount == 0) {
|
|
|
|
cache->unused++;
|
|
|
|
/*
|
|
|
|
* If there's any processes waiting for a block to become
|
|
|
|
* available, wake one up.
|
|
|
|
*/
|
|
|
|
if (cache->num_waiters) {
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
wake_up(&cache->wait_queue);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&cache->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Delete cache reclaiming all kmalloced buffers.
|
|
|
|
*/
|
|
|
|
void squashfs_cache_delete(struct squashfs_cache *cache)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (cache == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < cache->entries; i++) {
|
|
|
|
if (cache->entry[i].data) {
|
|
|
|
for (j = 0; j < cache->pages; j++)
|
|
|
|
kfree(cache->entry[i].data[j]);
|
|
|
|
kfree(cache->entry[i].data);
|
|
|
|
}
|
2013-11-18 02:59:12 +00:00
|
|
|
kfree(cache->entry[i].actor);
|
2009-01-05 08:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kfree(cache->entry);
|
|
|
|
kfree(cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialise cache allocating the specified number of entries, each of
|
|
|
|
* size block_size. To avoid vmalloc fragmentation issues each entry
|
2016-04-01 12:29:48 +00:00
|
|
|
* is allocated as a sequence of kmalloced PAGE_SIZE buffers.
|
2009-01-05 08:46:26 +00:00
|
|
|
*/
|
|
|
|
struct squashfs_cache *squashfs_cache_init(char *name, int entries,
|
|
|
|
int block_size)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (cache == NULL) {
|
|
|
|
ERROR("Failed to allocate %s cache\n", name);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL);
|
|
|
|
if (cache->entry == NULL) {
|
|
|
|
ERROR("Failed to allocate %s cache\n", name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-12-27 09:40:04 +00:00
|
|
|
cache->curr_blk = 0;
|
2009-01-05 08:46:26 +00:00
|
|
|
cache->next_blk = 0;
|
|
|
|
cache->unused = entries;
|
|
|
|
cache->entries = entries;
|
|
|
|
cache->block_size = block_size;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
cache->pages = block_size >> PAGE_SHIFT;
|
2009-05-13 01:56:39 +00:00
|
|
|
cache->pages = cache->pages ? cache->pages : 1;
|
2009-01-05 08:46:26 +00:00
|
|
|
cache->name = name;
|
|
|
|
cache->num_waiters = 0;
|
|
|
|
spin_lock_init(&cache->lock);
|
|
|
|
init_waitqueue_head(&cache->wait_queue);
|
|
|
|
|
|
|
|
for (i = 0; i < entries; i++) {
|
|
|
|
struct squashfs_cache_entry *entry = &cache->entry[i];
|
|
|
|
|
|
|
|
init_waitqueue_head(&cache->entry[i].wait_queue);
|
|
|
|
entry->cache = cache;
|
|
|
|
entry->block = SQUASHFS_INVALID_BLK;
|
|
|
|
entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL);
|
|
|
|
if (entry->data == NULL) {
|
|
|
|
ERROR("Failed to allocate %s cache entry\n", name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < cache->pages; j++) {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
2009-01-05 08:46:26 +00:00
|
|
|
if (entry->data[j] == NULL) {
|
|
|
|
ERROR("Failed to allocate %s buffer\n", name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
2013-11-18 02:59:12 +00:00
|
|
|
|
|
|
|
entry->actor = squashfs_page_actor_init(entry->data,
|
|
|
|
cache->pages, 0);
|
|
|
|
if (entry->actor == NULL) {
|
|
|
|
ERROR("Failed to allocate %s cache entry\n", name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2009-01-05 08:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return cache;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
squashfs_cache_delete(cache);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2011-03-31 01:57:33 +00:00
|
|
|
* Copy up to length bytes from cache entry to buffer starting at offset bytes
|
2009-01-05 08:46:26 +00:00
|
|
|
* into the cache entry. If there's not length bytes then copy the number of
|
|
|
|
* bytes available. In all cases return the number of bytes copied.
|
|
|
|
*/
|
|
|
|
int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry,
|
|
|
|
int offset, int length)
|
|
|
|
{
|
|
|
|
int remaining = length;
|
|
|
|
|
|
|
|
if (length == 0)
|
|
|
|
return 0;
|
|
|
|
else if (buffer == NULL)
|
|
|
|
return min(length, entry->length - offset);
|
|
|
|
|
|
|
|
while (offset < entry->length) {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
void *buff = entry->data[offset / PAGE_SIZE]
|
|
|
|
+ (offset % PAGE_SIZE);
|
2009-01-05 08:46:26 +00:00
|
|
|
int bytes = min_t(int, entry->length - offset,
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
PAGE_SIZE - (offset % PAGE_SIZE));
|
2009-01-05 08:46:26 +00:00
|
|
|
|
|
|
|
if (bytes >= remaining) {
|
|
|
|
memcpy(buffer, buff, remaining);
|
|
|
|
remaining = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(buffer, buff, bytes);
|
|
|
|
buffer += bytes;
|
|
|
|
remaining -= bytes;
|
|
|
|
offset += bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
return length - remaining;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read length bytes from metadata position <block, offset> (block is the
|
|
|
|
* start of the compressed block on disk, and offset is the offset into
|
|
|
|
* the block once decompressed). Data is packed into consecutive blocks,
|
|
|
|
* and length bytes may require reading more than one block.
|
|
|
|
*/
|
|
|
|
int squashfs_read_metadata(struct super_block *sb, void *buffer,
|
|
|
|
u64 *block, int *offset, int length)
|
|
|
|
{
|
|
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
2011-12-29 03:50:20 +00:00
|
|
|
int bytes, res = length;
|
2009-01-05 08:46:26 +00:00
|
|
|
struct squashfs_cache_entry *entry;
|
|
|
|
|
|
|
|
TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
|
|
|
|
|
2018-07-29 19:44:46 +00:00
|
|
|
if (unlikely(length < 0))
|
|
|
|
return -EIO;
|
|
|
|
|
2009-01-05 08:46:26 +00:00
|
|
|
while (length) {
|
|
|
|
entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
|
2011-12-29 03:50:20 +00:00
|
|
|
if (entry->error) {
|
|
|
|
res = entry->error;
|
|
|
|
goto error;
|
|
|
|
} else if (*offset >= entry->length) {
|
|
|
|
res = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
2009-01-05 08:46:26 +00:00
|
|
|
|
|
|
|
bytes = squashfs_copy_data(buffer, entry, *offset, length);
|
|
|
|
if (buffer)
|
|
|
|
buffer += bytes;
|
|
|
|
length -= bytes;
|
|
|
|
*offset += bytes;
|
|
|
|
|
|
|
|
if (*offset == entry->length) {
|
|
|
|
*block = entry->next_index;
|
|
|
|
*offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
squashfs_cache_put(entry);
|
|
|
|
}
|
|
|
|
|
2011-12-29 03:50:20 +00:00
|
|
|
return res;
|
|
|
|
|
|
|
|
error:
|
|
|
|
squashfs_cache_put(entry);
|
|
|
|
return res;
|
2009-01-05 08:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look-up in the fragmment cache the fragment located at <start_block> in the
|
|
|
|
* filesystem. If necessary read and decompress it from disk.
|
|
|
|
*/
|
|
|
|
struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *sb,
|
|
|
|
u64 start_block, int length)
|
|
|
|
{
|
|
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
|
|
|
|
|
|
return squashfs_cache_get(sb, msblk->fragment_cache, start_block,
|
|
|
|
length);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read and decompress the datablock located at <start_block> in the
|
|
|
|
* filesystem. The cache is used here to avoid duplicating locking and
|
|
|
|
* read/decompress code.
|
|
|
|
*/
|
|
|
|
struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
|
|
|
|
u64 start_block, int length)
|
|
|
|
{
|
|
|
|
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
|
|
|
|
|
|
|
return squashfs_cache_get(sb, msblk->read_page, start_block, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read a filesystem table (uncompressed sequence of bytes) from disk
|
|
|
|
*/
|
2011-05-20 01:26:43 +00:00
|
|
|
void *squashfs_read_table(struct super_block *sb, u64 block, int length)
|
2009-01-05 08:46:26 +00:00
|
|
|
{
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
2009-01-05 08:46:26 +00:00
|
|
|
int i, res;
|
2011-05-20 01:26:43 +00:00
|
|
|
void *table, *buffer, **data;
|
2013-11-18 02:59:12 +00:00
|
|
|
struct squashfs_page_actor *actor;
|
2011-05-20 01:26:43 +00:00
|
|
|
|
|
|
|
table = buffer = kmalloc(length, GFP_KERNEL);
|
|
|
|
if (table == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
|
|
|
|
if (data == NULL) {
|
|
|
|
res = -ENOMEM;
|
|
|
|
goto failed;
|
|
|
|
}
|
2009-01-05 08:46:26 +00:00
|
|
|
|
2013-11-18 02:59:12 +00:00
|
|
|
actor = squashfs_page_actor_init(data, pages, length);
|
|
|
|
if (actor == NULL) {
|
|
|
|
res = -ENOMEM;
|
|
|
|
goto failed2;
|
|
|
|
}
|
|
|
|
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
for (i = 0; i < pages; i++, buffer += PAGE_SIZE)
|
2009-01-05 08:46:26 +00:00
|
|
|
data[i] = buffer;
|
2011-05-20 01:26:43 +00:00
|
|
|
|
2013-11-18 02:59:12 +00:00
|
|
|
res = squashfs_read_data(sb, block, length |
|
|
|
|
SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, actor);
|
2011-05-20 01:26:43 +00:00
|
|
|
|
2009-01-05 08:46:26 +00:00
|
|
|
kfree(data);
|
2013-11-18 02:59:12 +00:00
|
|
|
kfree(actor);
|
2011-05-20 01:26:43 +00:00
|
|
|
|
|
|
|
if (res < 0)
|
|
|
|
goto failed;
|
|
|
|
|
|
|
|
return table;
|
|
|
|
|
2013-11-18 02:59:12 +00:00
|
|
|
failed2:
|
|
|
|
kfree(data);
|
2011-05-20 01:26:43 +00:00
|
|
|
failed:
|
|
|
|
kfree(table);
|
|
|
|
return ERR_PTR(res);
|
2009-01-05 08:46:26 +00:00
|
|
|
}
|