Merge tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf, reiserfs, and quota updates from Jan Kara:

 - Fix for udf to make splicing work again

 - More disk format sanity checks for ext2 to avoid crashes found by
   syzbot

 - More quota disk format checks to avoid crashes found by fuzzing

 - Reiserfs & isofs cleanups

* tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  quota: Add more checking after reading from quota file
  quota: Replace all block number checking with helper function
  quota: Check next/prev free block number after reading from quota file
  ext2: Use kvmalloc() for group descriptor array
  ext2: Add sanity checks for group and filesystem size
  udf: Support splicing to file
  isofs: delete unnecessary checks before brelse()
  fs/reiserfs: replace ternary operator with min() and min_t()
This commit is contained in:
Linus Torvalds 2022-10-07 08:42:37 -07:00
commit 188943a156
7 changed files with 86 additions and 30 deletions

View File

@ -163,7 +163,7 @@ static void ext2_put_super (struct super_block * sb)
db_count = sbi->s_gdb_count;
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
kfree(sbi->s_group_desc);
kvfree(sbi->s_group_desc);
kfree(sbi->s_debts);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
@ -1052,6 +1052,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_blocks_per_group);
goto failed_mount;
}
/* At least inode table, bitmaps, and sb have to fit in one group */
if (sbi->s_blocks_per_group <= sbi->s_itb_per_group + 3) {
ext2_msg(sb, KERN_ERR,
"error: #blocks per group smaller than metadata size: %lu <= %lu",
sbi->s_blocks_per_group, sbi->s_inodes_per_group + 3);
goto failed_mount;
}
if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
ext2_msg(sb, KERN_ERR,
"error: #fragments per group too big: %lu",
@ -1065,9 +1072,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_inodes_per_group);
goto failed_mount;
}
if (sb_bdev_nr_blocks(sb) < le32_to_cpu(es->s_blocks_count)) {
ext2_msg(sb, KERN_ERR,
"bad geometry: block count %u exceeds size of device (%u blocks)",
le32_to_cpu(es->s_blocks_count),
(unsigned)sb_bdev_nr_blocks(sb));
goto failed_mount;
}
if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
goto cantfind_ext2;
sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
le32_to_cpu(es->s_first_data_block) - 1)
/ EXT2_BLOCKS_PER_GROUP(sb)) + 1;
@ -1080,7 +1092,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
EXT2_DESC_PER_BLOCK(sb);
sbi->s_group_desc = kmalloc_array(db_count,
sbi->s_group_desc = kvmalloc_array(db_count,
sizeof(struct buffer_head *),
GFP_KERNEL);
if (sbi->s_group_desc == NULL) {
@ -1206,7 +1218,7 @@ failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
failed_mount_group_desc:
kfree(sbi->s_group_desc);
kvfree(sbi->s_group_desc);
kfree(sbi->s_debts);
failed_mount:
brelse(bh);

View File

@ -1277,13 +1277,11 @@ static int isofs_read_level3_size(struct inode *inode)
} while (more_entries);
out:
kfree(tmpde);
if (bh)
brelse(bh);
brelse(bh);
return 0;
out_nomem:
if (bh)
brelse(bh);
brelse(bh);
return -ENOMEM;
out_noread:
@ -1486,8 +1484,7 @@ static int isofs_read_inode(struct inode *inode, int relocated)
ret = 0;
out:
kfree(tmpde);
if (bh)
brelse(bh);
brelse(bh);
return ret;
out_badread:

View File

@ -71,6 +71,40 @@ static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
return ret;
}
static inline int do_check_range(struct super_block *sb, const char *val_name,
uint val, uint min_val, uint max_val)
{
if (val < min_val || val > max_val) {
quota_error(sb, "Getting %s %u out of range %u-%u",
val_name, val, min_val, max_val);
return -EUCLEAN;
}
return 0;
}
static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
struct qt_disk_dqdbheader *dh)
{
int err = 0;
err = do_check_range(info->dqi_sb, "dqdh_next_free",
le32_to_cpu(dh->dqdh_next_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_prev_free",
le32_to_cpu(dh->dqdh_prev_free), 0,
info->dqi_blocks - 1);
if (err)
return err;
err = do_check_range(info->dqi_sb, "dqdh_entries",
le16_to_cpu(dh->dqdh_entries), 0,
qtree_dqstr_in_blk(info));
return err;
}
/* Remove empty block from list and return it */
static int get_free_dqblk(struct qtree_mem_dqinfo *info)
{
@ -85,6 +119,9 @@ static int get_free_dqblk(struct qtree_mem_dqinfo *info)
ret = read_blk(info, blk, buf);
if (ret < 0)
goto out_buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
}
else {
@ -232,6 +269,9 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
*err = read_blk(info, blk, buf);
if (*err < 0)
goto out_buf;
*err = check_dquot_block_header(info, dh);
if (*err)
goto out_buf;
} else {
blk = get_free_dqblk(info);
if ((int)blk < 0) {
@ -313,6 +353,10 @@ static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
}
ref = (__le32 *)buf;
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (!newblk)
newson = 1;
if (depth == info->dqi_qtree_depth - 1) {
@ -424,6 +468,9 @@ static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf;
}
dh = (struct qt_disk_dqdbheader *)buf;
ret = check_dquot_block_header(info, dh);
if (ret)
goto out_buf;
le16_add_cpu(&dh->dqdh_entries, -1);
if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
ret = remove_free_dqentry(info, buf, blk);
@ -480,12 +527,10 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
goto out_buf;
}
newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) {
quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
newblk, info->dqi_blocks);
ret = -EUCLEAN;
ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
}
if (depth == info->dqi_qtree_depth - 1) {
ret = free_dqentry(info, dquot, newblk);
@ -586,12 +631,10 @@ static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
if (!blk) /* No reference? */
goto out_buf;
if (blk < QT_TREEOFF || blk >= info->dqi_blocks) {
quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)",
blk, info->dqi_blocks);
ret = -EUCLEAN;
ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
}
if (depth < info->dqi_qtree_depth - 1)
ret = find_tree_dqentry(info, dquot, blk, depth+1);
@ -705,15 +748,21 @@ static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
goto out_buf;
}
for (i = __get_index(info, *id, depth); i < epb; i++) {
if (ref[i] == cpu_to_le32(0)) {
uint blk_no = le32_to_cpu(ref[i]);
if (blk_no == 0) {
*id += level_inc;
continue;
}
ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
info->dqi_blocks - 1);
if (ret)
goto out_buf;
if (depth == info->dqi_qtree_depth - 1) {
ret = 0;
goto out_buf;
}
ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
ret = find_next_id(info, id, blk_no, depth + 1);
if (ret != -ENOENT)
break;
}

View File

@ -456,7 +456,7 @@ static int print_internal(struct buffer_head *bh, int first, int last)
to = B_NR_ITEMS(bh);
} else {
from = first;
to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh);
to = min_t(int, last, B_NR_ITEMS(bh));
}
reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);

View File

@ -97,7 +97,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
* using the copy_size var below allows this code to work for
* both shrinking and expanding the FS.
*/
copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr;
copy_size = min(bmap_nr_new, bmap_nr);
copy_size =
copy_size * sizeof(struct reiserfs_list_bitmap_node *);
for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {

View File

@ -2504,9 +2504,7 @@ static ssize_t reiserfs_quota_read(struct super_block *sb, int type, char *data,
len = i_size - off;
toread = len;
while (toread > 0) {
tocopy =
sb->s_blocksize - offset <
toread ? sb->s_blocksize - offset : toread;
tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
tmp_bh.b_state = 0;
/*
* Quota files are without tails so we can safely
@ -2554,8 +2552,7 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
return -EIO;
}
while (towrite > 0) {
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite);
tmp_bh.b_state = 0;
reiserfs_write_lock(sb);
err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);

View File

@ -252,6 +252,7 @@ const struct file_operations udf_file_operations = {
.release = udf_release_file,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.llseek = generic_file_llseek,
};