Fix squashfs failing to load sparse files
SquashFS supports sprase blocks in files - that is, if a given block is composed only of zeros, it's not written to the output file to save space and it's on-disk length field is set to zero to indicate that. Previously the squashfs driver did not recognise that, and would attempt to read and decompress a zero-sized block, which obviously failed. The following command may be used to create a file for testing: cat <(dd if=/dev/urandom of=/dev/stdout bs=1M count=1) \ <(dd if=/dev/zero of=/dev/stdout bs=1M count=1) \ <(dd if=/dev/urandom of=/dev/stdout bs=1k count=200) >test_file Signed-off-by: Campbell Suter <campbell@snapit.group>
This commit is contained in:
parent
3b72612ad1
commit
9dba07f143
@ -1310,7 +1310,7 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len,
|
||||
{
|
||||
char *dir = NULL, *fragment_block, *datablock = NULL, *data_buffer = NULL;
|
||||
char *fragment = NULL, *file = NULL, *resolved, *data;
|
||||
u64 start, n_blks, table_size, data_offset, table_offset;
|
||||
u64 start, n_blks, table_size, data_offset, table_offset, sparse_size;
|
||||
int ret, j, i_number, datablk_count = 0;
|
||||
struct squashfs_super_block *sblk = ctxt.sblk;
|
||||
struct squashfs_fragment_block_entry frag_entry;
|
||||
@ -1444,28 +1444,43 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len,
|
||||
n_blks = DIV_ROUND_UP(table_size + table_offset,
|
||||
ctxt.cur_dev->blksz);
|
||||
|
||||
data_buffer = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz);
|
||||
/* Don't load any data for sparse blocks */
|
||||
if (finfo.blk_sizes[j] == 0) {
|
||||
n_blks = 0;
|
||||
table_offset = 0;
|
||||
data_buffer = NULL;
|
||||
data = NULL;
|
||||
} else {
|
||||
data_buffer = malloc_cache_aligned(n_blks * ctxt.cur_dev->blksz);
|
||||
|
||||
if (!data_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
if (!data_buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = sqfs_disk_read(start, n_blks, data_buffer);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Possible causes: too many data blocks or too large
|
||||
* SquashFS block size. Tip: re-compile the SquashFS
|
||||
* image with mksquashfs's -b <block_size> option.
|
||||
*/
|
||||
printf("Error: too many data blocks to be read.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
data = data_buffer + table_offset;
|
||||
}
|
||||
|
||||
ret = sqfs_disk_read(start, n_blks, data_buffer);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Possible causes: too many data blocks or too large
|
||||
* SquashFS block size. Tip: re-compile the SquashFS
|
||||
* image with mksquashfs's -b <block_size> option.
|
||||
*/
|
||||
printf("Error: too many data blocks to be read.\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
data = data_buffer + table_offset;
|
||||
|
||||
/* Load the data */
|
||||
if (SQFS_COMPRESSED_BLOCK(finfo.blk_sizes[j])) {
|
||||
if (finfo.blk_sizes[j] == 0) {
|
||||
/* This is a sparse block */
|
||||
sparse_size = get_unaligned_le32(&sblk->block_size);
|
||||
if ((*actread + sparse_size) > len)
|
||||
sparse_size = len - *actread;
|
||||
memset(buf + *actread, 0, sparse_size);
|
||||
*actread += sparse_size;
|
||||
} else if (SQFS_COMPRESSED_BLOCK(finfo.blk_sizes[j])) {
|
||||
dest_len = get_unaligned_le32(&sblk->block_size);
|
||||
ret = sqfs_decompress(&ctxt, datablock, &dest_len,
|
||||
data, table_size);
|
||||
@ -1484,7 +1499,8 @@ int sqfs_read(const char *filename, void *buf, loff_t offset, loff_t len,
|
||||
}
|
||||
|
||||
data_offset += table_size;
|
||||
free(data_buffer);
|
||||
if (data_buffer)
|
||||
free(data_buffer);
|
||||
data_buffer = NULL;
|
||||
if (*actread >= len)
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user