linux/fs/hfs/inode.c

705 lines
19 KiB
C
Raw Normal View History

/*
* linux/fs/hfs/inode.c
*
* Copyright (C) 1995-1997 Paul H. Hargrove
* (C) 2003 Ardis Technologies <roman@ardistech.com>
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains inode-related functions which do not depend on
* which scheme is being used to represent forks.
*
* Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
*/
#include <linux/pagemap.h>
#include <linux/mpage.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/uio.h>
#include <linux/xattr.h>
#include <linux/blkdev.h>
#include "hfs_fs.h"
#include "btree.h"
static const struct file_operations hfs_file_operations;
static const struct inode_operations hfs_file_inode_operations;
/*================ Variable-like macros ================*/
#define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
static int hfs_read_folio(struct file *file, struct folio *folio)
{
return block_read_full_folio(folio, hfs_get_block);
}
static void hfs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
if (to > inode->i_size) {
truncate_pagecache(inode, inode->i_size);
hfs_file_truncate(inode);
}
}
int hfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
{
int ret;
ret = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
hfs_get_block,
&HFS_I(mapping->host)->phys_size);
if (unlikely(ret))
hfs_write_failed(mapping, pos + len);
return ret;
}
static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, hfs_get_block);
}
static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
struct inode *inode = folio->mapping->host;
struct super_block *sb = inode->i_sb;
struct hfs_btree *tree;
struct hfs_bnode *node;
u32 nidx;
int i;
bool res = true;
switch (inode->i_ino) {
case HFS_EXT_CNID:
tree = HFS_SB(sb)->ext_tree;
break;
case HFS_CAT_CNID:
tree = HFS_SB(sb)->cat_tree;
break;
default:
BUG();
return false;
}
if (!tree)
return false;
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
if (tree->node_size >= PAGE_SIZE) {
nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
spin_lock(&tree->hash_lock);
node = hfs_bnode_findhash(tree, nidx);
if (!node)
;
else if (atomic_read(&node->refcnt))
res = false;
if (res && node) {
hfs_bnode_unhash(node);
hfs_bnode_free(node);
}
spin_unlock(&tree->hash_lock);
} else {
nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
i = 1 << (PAGE_SHIFT - tree->node_size_shift);
spin_lock(&tree->hash_lock);
do {
node = hfs_bnode_findhash(tree, nidx++);
if (!node)
continue;
if (atomic_read(&node->refcnt)) {
res = false;
break;
}
hfs_bnode_unhash(node);
hfs_bnode_free(node);
} while (--i && nidx < tree->node_count);
spin_unlock(&tree->hash_lock);
}
return res ? try_to_free_buffers(folio) : false;
}
static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
if (end > isize)
hfs_write_failed(mapping, end);
}
return ret;
}
static int hfs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, hfs_get_block);
}
const struct address_space_operations hfs_btree_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfs_read_folio,
.writepages = hfs_writepages,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.migrate_folio = buffer_migrate_folio,
.bmap = hfs_bmap,
.release_folio = hfs_release_folio,
};
const struct address_space_operations hfs_aops = {
.dirty_folio = block_dirty_folio,
.invalidate_folio = block_invalidate_folio,
.read_folio = hfs_read_folio,
.write_begin = hfs_write_begin,
.write_end = generic_write_end,
.bmap = hfs_bmap,
.direct_IO = hfs_direct_IO,
.writepages = hfs_writepages,
.migrate_folio = buffer_migrate_folio,
};
/*
* hfs_new_inode
*/
struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
if (!inode)
return NULL;
mutex_init(&HFS_I(inode)->extents_lock);
INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
spin_lock_init(&HFS_I(inode)->open_dir_lock);
hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
inode->i_ino = HFS_SB(sb)->next_id++;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
set_nlink(inode, 1);
simple_inode_init_ts(inode);
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
HFS_I(inode)->fs_blocks = 0;
hfs: fix to initialize fields of hfs_inode_info after hfs_alloc_inode() Syzbot reports uninitialized value access issue as below: loop0: detected capacity change from 0 to 64 ===================================================== BUG: KMSAN: uninit-value in hfs_revalidate_dentry+0x307/0x3f0 fs/hfs/sysdep.c:30 hfs_revalidate_dentry+0x307/0x3f0 fs/hfs/sysdep.c:30 d_revalidate fs/namei.c:862 [inline] lookup_fast+0x89e/0x8e0 fs/namei.c:1649 walk_component fs/namei.c:2001 [inline] link_path_walk+0x817/0x1480 fs/namei.c:2332 path_lookupat+0xd9/0x6f0 fs/namei.c:2485 filename_lookup+0x22e/0x740 fs/namei.c:2515 user_path_at_empty+0x8b/0x390 fs/namei.c:2924 user_path_at include/linux/namei.h:57 [inline] do_mount fs/namespace.c:3689 [inline] __do_sys_mount fs/namespace.c:3898 [inline] __se_sys_mount+0x66b/0x810 fs/namespace.c:3875 __x64_sys_mount+0xe4/0x140 fs/namespace.c:3875 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b BUG: KMSAN: uninit-value in hfs_ext_read_extent fs/hfs/extent.c:196 [inline] BUG: KMSAN: uninit-value in hfs_get_block+0x92d/0x1620 fs/hfs/extent.c:366 hfs_ext_read_extent fs/hfs/extent.c:196 [inline] hfs_get_block+0x92d/0x1620 fs/hfs/extent.c:366 block_read_full_folio+0x4ff/0x11b0 fs/buffer.c:2271 hfs_read_folio+0x55/0x60 fs/hfs/inode.c:39 filemap_read_folio+0x148/0x4f0 mm/filemap.c:2426 do_read_cache_folio+0x7c8/0xd90 mm/filemap.c:3553 do_read_cache_page mm/filemap.c:3595 [inline] read_cache_page+0xfb/0x2f0 mm/filemap.c:3604 read_mapping_page include/linux/pagemap.h:755 [inline] hfs_btree_open+0x928/0x1ae0 fs/hfs/btree.c:78 hfs_mdb_get+0x260c/0x3000 fs/hfs/mdb.c:204 hfs_fill_super+0x1fb1/0x2790 fs/hfs/super.c:406 mount_bdev+0x628/0x920 fs/super.c:1359 hfs_mount+0xcd/0xe0 fs/hfs/super.c:456 legacy_get_tree+0x167/0x2e0 fs/fs_context.c:610 vfs_get_tree+0xdc/0x5d0 fs/super.c:1489 do_new_mount+0x7a9/0x16f0 fs/namespace.c:3145 path_mount+0xf98/0x26a0 fs/namespace.c:3475 do_mount fs/namespace.c:3488 [inline] __do_sys_mount fs/namespace.c:3697 [inline] __se_sys_mount+0x919/0x9e0 fs/namespace.c:3674 __ia32_sys_mount+0x15b/0x1b0 fs/namespace.c:3674 do_syscall_32_irqs_on arch/x86/entry/common.c:112 [inline] __do_fast_syscall_32+0xa2/0x100 arch/x86/entry/common.c:178 do_fast_syscall_32+0x37/0x80 arch/x86/entry/common.c:203 do_SYSENTER_32+0x1f/0x30 arch/x86/entry/common.c:246 entry_SYSENTER_compat_after_hwframe+0x70/0x82 Uninit was created at: __alloc_pages+0x9a6/0xe00 mm/page_alloc.c:4590 __alloc_pages_node include/linux/gfp.h:238 [inline] alloc_pages_node include/linux/gfp.h:261 [inline] alloc_slab_page mm/slub.c:2190 [inline] allocate_slab mm/slub.c:2354 [inline] new_slab+0x2d7/0x1400 mm/slub.c:2407 ___slab_alloc+0x16b5/0x3970 mm/slub.c:3540 __slab_alloc mm/slub.c:3625 [inline] __slab_alloc_node mm/slub.c:3678 [inline] slab_alloc_node mm/slub.c:3850 [inline] kmem_cache_alloc_lru+0x64d/0xb30 mm/slub.c:3879 alloc_inode_sb include/linux/fs.h:3018 [inline] hfs_alloc_inode+0x5a/0xc0 fs/hfs/super.c:165 alloc_inode+0x83/0x440 fs/inode.c:260 new_inode_pseudo fs/inode.c:1005 [inline] new_inode+0x38/0x4f0 fs/inode.c:1031 hfs_new_inode+0x61/0x1010 fs/hfs/inode.c:186 hfs_mkdir+0x54/0x250 fs/hfs/dir.c:228 vfs_mkdir+0x49a/0x700 fs/namei.c:4126 do_mkdirat+0x529/0x810 fs/namei.c:4149 __do_sys_mkdirat fs/namei.c:4164 [inline] __se_sys_mkdirat fs/namei.c:4162 [inline] __x64_sys_mkdirat+0xc8/0x120 fs/namei.c:4162 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b It missed to initialize .tz_secondswest, .cached_start and .cached_blocks fields in struct hfs_inode_info after hfs_alloc_inode(), fix it. Cc: stable@vger.kernel.org Reported-by: syzbot+3ae6be33a50b5aae4dab@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-fsdevel/0000000000005ad04005ee48897f@google.com Signed-off-by: Chao Yu <chao@kernel.org> Link: https://lore.kernel.org/r/20240616013841.2217-1-chao@kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-06-16 01:38:41 +00:00
HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
if (S_ISDIR(mode)) {
inode->i_size = 2;
HFS_SB(sb)->folder_count++;
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_dirs++;
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
inode->i_mode |= S_IRWXUGO;
inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
} else if (S_ISREG(mode)) {
HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
HFS_SB(sb)->file_count++;
if (dir->i_ino == HFS_ROOT_CNID)
HFS_SB(sb)->root_files++;
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
inode->i_mode |= S_IRUGO|S_IXUGO;
if (mode & S_IWUSR)
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
HFS_I(inode)->phys_size = 0;
HFS_I(inode)->alloc_blocks = 0;
HFS_I(inode)->first_blocks = 0;
HFS_I(inode)->cached_start = 0;
HFS_I(inode)->cached_blocks = 0;
memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
}
insert_inode_hash(inode);
mark_inode_dirty(inode);
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
return inode;
}
void hfs_delete_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
hfs_dbg(INODE, "delete_inode: %lu\n", inode->i_ino);
if (S_ISDIR(inode->i_mode)) {
HFS_SB(sb)->folder_count--;
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_dirs--;
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
return;
}
HFS_SB(sb)->file_count--;
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_files--;
if (S_ISREG(inode->i_mode)) {
if (!inode->i_nlink) {
inode->i_size = 0;
hfs_file_truncate(inode);
}
}
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
hfs_mark_mdb_dirty(sb);
}
void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
__be32 __log_size, __be32 phys_size, u32 clump_size)
{
struct super_block *sb = inode->i_sb;
u32 log_size = be32_to_cpu(__log_size);
u16 count;
int i;
memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
for (count = 0, i = 0; i < 3; i++)
count += be16_to_cpu(ext[i].count);
HFS_I(inode)->first_blocks = count;
hfs: fix to initialize fields of hfs_inode_info after hfs_alloc_inode() Syzbot reports uninitialized value access issue as below: loop0: detected capacity change from 0 to 64 ===================================================== BUG: KMSAN: uninit-value in hfs_revalidate_dentry+0x307/0x3f0 fs/hfs/sysdep.c:30 hfs_revalidate_dentry+0x307/0x3f0 fs/hfs/sysdep.c:30 d_revalidate fs/namei.c:862 [inline] lookup_fast+0x89e/0x8e0 fs/namei.c:1649 walk_component fs/namei.c:2001 [inline] link_path_walk+0x817/0x1480 fs/namei.c:2332 path_lookupat+0xd9/0x6f0 fs/namei.c:2485 filename_lookup+0x22e/0x740 fs/namei.c:2515 user_path_at_empty+0x8b/0x390 fs/namei.c:2924 user_path_at include/linux/namei.h:57 [inline] do_mount fs/namespace.c:3689 [inline] __do_sys_mount fs/namespace.c:3898 [inline] __se_sys_mount+0x66b/0x810 fs/namespace.c:3875 __x64_sys_mount+0xe4/0x140 fs/namespace.c:3875 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b BUG: KMSAN: uninit-value in hfs_ext_read_extent fs/hfs/extent.c:196 [inline] BUG: KMSAN: uninit-value in hfs_get_block+0x92d/0x1620 fs/hfs/extent.c:366 hfs_ext_read_extent fs/hfs/extent.c:196 [inline] hfs_get_block+0x92d/0x1620 fs/hfs/extent.c:366 block_read_full_folio+0x4ff/0x11b0 fs/buffer.c:2271 hfs_read_folio+0x55/0x60 fs/hfs/inode.c:39 filemap_read_folio+0x148/0x4f0 mm/filemap.c:2426 do_read_cache_folio+0x7c8/0xd90 mm/filemap.c:3553 do_read_cache_page mm/filemap.c:3595 [inline] read_cache_page+0xfb/0x2f0 mm/filemap.c:3604 read_mapping_page include/linux/pagemap.h:755 [inline] hfs_btree_open+0x928/0x1ae0 fs/hfs/btree.c:78 hfs_mdb_get+0x260c/0x3000 fs/hfs/mdb.c:204 hfs_fill_super+0x1fb1/0x2790 fs/hfs/super.c:406 mount_bdev+0x628/0x920 fs/super.c:1359 hfs_mount+0xcd/0xe0 fs/hfs/super.c:456 legacy_get_tree+0x167/0x2e0 fs/fs_context.c:610 vfs_get_tree+0xdc/0x5d0 fs/super.c:1489 do_new_mount+0x7a9/0x16f0 fs/namespace.c:3145 path_mount+0xf98/0x26a0 fs/namespace.c:3475 do_mount fs/namespace.c:3488 [inline] __do_sys_mount fs/namespace.c:3697 [inline] __se_sys_mount+0x919/0x9e0 fs/namespace.c:3674 __ia32_sys_mount+0x15b/0x1b0 fs/namespace.c:3674 do_syscall_32_irqs_on arch/x86/entry/common.c:112 [inline] __do_fast_syscall_32+0xa2/0x100 arch/x86/entry/common.c:178 do_fast_syscall_32+0x37/0x80 arch/x86/entry/common.c:203 do_SYSENTER_32+0x1f/0x30 arch/x86/entry/common.c:246 entry_SYSENTER_compat_after_hwframe+0x70/0x82 Uninit was created at: __alloc_pages+0x9a6/0xe00 mm/page_alloc.c:4590 __alloc_pages_node include/linux/gfp.h:238 [inline] alloc_pages_node include/linux/gfp.h:261 [inline] alloc_slab_page mm/slub.c:2190 [inline] allocate_slab mm/slub.c:2354 [inline] new_slab+0x2d7/0x1400 mm/slub.c:2407 ___slab_alloc+0x16b5/0x3970 mm/slub.c:3540 __slab_alloc mm/slub.c:3625 [inline] __slab_alloc_node mm/slub.c:3678 [inline] slab_alloc_node mm/slub.c:3850 [inline] kmem_cache_alloc_lru+0x64d/0xb30 mm/slub.c:3879 alloc_inode_sb include/linux/fs.h:3018 [inline] hfs_alloc_inode+0x5a/0xc0 fs/hfs/super.c:165 alloc_inode+0x83/0x440 fs/inode.c:260 new_inode_pseudo fs/inode.c:1005 [inline] new_inode+0x38/0x4f0 fs/inode.c:1031 hfs_new_inode+0x61/0x1010 fs/hfs/inode.c:186 hfs_mkdir+0x54/0x250 fs/hfs/dir.c:228 vfs_mkdir+0x49a/0x700 fs/namei.c:4126 do_mkdirat+0x529/0x810 fs/namei.c:4149 __do_sys_mkdirat fs/namei.c:4164 [inline] __se_sys_mkdirat fs/namei.c:4162 [inline] __x64_sys_mkdirat+0xc8/0x120 fs/namei.c:4162 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xcf/0x1e0 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x63/0x6b It missed to initialize .tz_secondswest, .cached_start and .cached_blocks fields in struct hfs_inode_info after hfs_alloc_inode(), fix it. Cc: stable@vger.kernel.org Reported-by: syzbot+3ae6be33a50b5aae4dab@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-fsdevel/0000000000005ad04005ee48897f@google.com Signed-off-by: Chao Yu <chao@kernel.org> Link: https://lore.kernel.org/r/20240616013841.2217-1-chao@kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
2024-06-16 01:38:41 +00:00
HFS_I(inode)->cached_start = 0;
HFS_I(inode)->cached_blocks = 0;
inode->i_size = HFS_I(inode)->phys_size = log_size;
HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
HFS_SB(sb)->alloc_blksz;
HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
if (!HFS_I(inode)->clump_blocks)
HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
}
struct hfs_iget_data {
struct hfs_cat_key *key;
hfs_cat_rec *rec;
};
static int hfs_test_inode(struct inode *inode, void *data)
{
struct hfs_iget_data *idata = data;
hfs_cat_rec *rec;
rec = idata->rec;
switch (rec->type) {
case HFS_CDR_DIR:
return inode->i_ino == be32_to_cpu(rec->dir.DirID);
case HFS_CDR_FIL:
return inode->i_ino == be32_to_cpu(rec->file.FlNum);
default:
BUG();
return 1;
}
}
/*
* hfs_read_inode
*/
static int hfs_read_inode(struct inode *inode, void *data)
{
struct hfs_iget_data *idata = data;
struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
hfs_cat_rec *rec;
HFS_I(inode)->flags = 0;
HFS_I(inode)->rsrc_inode = NULL;
mutex_init(&HFS_I(inode)->extents_lock);
INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
spin_lock_init(&HFS_I(inode)->open_dir_lock);
/* Initialize the inode */
inode->i_uid = hsb->s_uid;
inode->i_gid = hsb->s_gid;
set_nlink(inode, 1);
if (idata->key)
HFS_I(inode)->cat_key = *idata->key;
else
HFS_I(inode)->flags |= HFS_FLG_RSRC;
HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
rec = idata->rec;
switch (rec->type) {
case HFS_CDR_FIL:
if (!HFS_IS_RSRC(inode)) {
hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
} else {
hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
}
inode->i_ino = be32_to_cpu(rec->file.FlNum);
inode->i_mode = S_IRUGO | S_IXUGO;
if (!(rec->file.Flags & HFS_FIL_LOCK))
inode->i_mode |= S_IWUGO;
inode->i_mode &= ~hsb->s_file_umask;
inode->i_mode |= S_IFREG;
inode_set_mtime_to_ts(inode,
inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->file.MdDat))));
inode->i_op = &hfs_file_inode_operations;
inode->i_fop = &hfs_file_operations;
inode->i_mapping->a_ops = &hfs_aops;
break;
case HFS_CDR_DIR:
inode->i_ino = be32_to_cpu(rec->dir.DirID);
inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
HFS_I(inode)->fs_blocks = 0;
inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
inode_set_mtime_to_ts(inode,
inode_set_atime_to_ts(inode, inode_set_ctime_to_ts(inode, hfs_m_to_utime(rec->dir.MdDat))));
inode->i_op = &hfs_dir_inode_operations;
inode->i_fop = &hfs_dir_operations;
break;
default:
make_bad_inode(inode);
}
return 0;
}
/*
* __hfs_iget()
*
* Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
* the catalog B-tree and the 'type' of the desired file return the
* inode for that file/directory or NULL. Note that 'type' indicates
* whether we want the actual file or directory, or the corresponding
* metadata (AppleDouble header file or CAP metadata file).
*/
struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
{
struct hfs_iget_data data = { key, rec };
struct inode *inode;
u32 cnid;
switch (rec->type) {
case HFS_CDR_DIR:
cnid = be32_to_cpu(rec->dir.DirID);
break;
case HFS_CDR_FIL:
cnid = be32_to_cpu(rec->file.FlNum);
break;
default:
return NULL;
}
inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
if (inode && (inode->i_state & I_NEW))
unlock_new_inode(inode);
return inode;
}
void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
__be32 *log_size, __be32 *phys_size)
{
memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
if (log_size)
*log_size = cpu_to_be32(inode->i_size);
if (phys_size)
*phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
HFS_SB(inode->i_sb)->alloc_blksz);
}
int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct inode *main_inode = inode;
struct hfs_find_data fd;
hfs_cat_rec rec;
int res;
hfs_dbg(INODE, "hfs_write_inode: %lu\n", inode->i_ino);
res = hfs_ext_write_extent(inode);
if (res)
return res;
if (inode->i_ino < HFS_FIRSTUSER_CNID) {
switch (inode->i_ino) {
case HFS_ROOT_CNID:
break;
case HFS_EXT_CNID:
hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
return 0;
case HFS_CAT_CNID:
hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
return 0;
default:
BUG();
return -EIO;
}
}
if (HFS_IS_RSRC(inode))
main_inode = HFS_I(inode)->rsrc_inode;
if (!main_inode->i_nlink)
return 0;
if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
/* panic? */
return -EIO;
res = -EIO;
hfs: fix OOB Read in __hfs_brec_find Syzbot reported a OOB read bug: ================================================================== BUG: KASAN: slab-out-of-bounds in hfs_strcmp+0x117/0x190 fs/hfs/string.c:84 Read of size 1 at addr ffff88807eb62c4e by task kworker/u4:1/11 CPU: 1 PID: 11 Comm: kworker/u4:1 Not tainted 6.1.0-rc6-syzkaller-00308-g644e9524388a #0 Workqueue: writeback wb_workfn (flush-7:0) Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1b1/0x28e lib/dump_stack.c:106 print_address_description+0x74/0x340 mm/kasan/report.c:284 print_report+0x107/0x1f0 mm/kasan/report.c:395 kasan_report+0xcd/0x100 mm/kasan/report.c:495 hfs_strcmp+0x117/0x190 fs/hfs/string.c:84 __hfs_brec_find+0x213/0x5c0 fs/hfs/bfind.c:75 hfs_brec_find+0x276/0x520 fs/hfs/bfind.c:138 hfs_write_inode+0x34c/0xb40 fs/hfs/inode.c:462 write_inode fs/fs-writeback.c:1440 [inline] If the input inode of hfs_write_inode() is incorrect: struct inode struct hfs_inode_info struct hfs_cat_key struct hfs_name u8 len # len is greater than HFS_NAMELEN(31) which is the maximum length of an HFS filename OOB read occurred: hfs_write_inode() hfs_brec_find() __hfs_brec_find() hfs_cat_keycmp() hfs_strcmp() # OOB read occurred due to len is too large Fix this by adding a Check on len in hfs_write_inode() before calling hfs_brec_find(). Link: https://lkml.kernel.org/r/20221130065959.2168236-1-zhangpeng362@huawei.com Signed-off-by: ZhangPeng <zhangpeng362@huawei.com> Reported-by: <syzbot+e836ff7133ac02be825f@syzkaller.appspotmail.com> Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jeff Layton <jlayton@kernel.org> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Nanyong Sun <sunnanyong@huawei.com> Cc: Viacheslav Dubeyko <slava@dubeyko.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-11-30 06:59:59 +00:00
if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
goto out;
fd.search_key->cat = HFS_I(main_inode)->cat_key;
if (hfs_brec_find(&fd))
goto out;
if (S_ISDIR(main_inode->i_mode)) {
if (fd.entrylength < sizeof(struct hfs_cat_dir))
goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
if (rec.type != HFS_CDR_DIR ||
be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
}
rec.dir.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
rec.dir.Val = cpu_to_be16(inode->i_size - 2);
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_dir));
} else if (HFS_IS_RSRC(inode)) {
if (fd.entrylength < sizeof(struct hfs_cat_file))
goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
hfs_inode_write_fork(inode, rec.file.RExtRec,
&rec.file.RLgLen, &rec.file.RPyLen);
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
} else {
if (fd.entrylength < sizeof(struct hfs_cat_file))
goto out;
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
if (rec.type != HFS_CDR_FIL ||
be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
}
if (inode->i_mode & S_IWUSR)
rec.file.Flags &= ~HFS_FIL_LOCK;
else
rec.file.Flags |= HFS_FIL_LOCK;
hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
rec.file.MdDat = hfs_u_to_mtime(inode_get_mtime(inode));
hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
sizeof(struct hfs_cat_file));
}
res = 0;
out:
hfs_find_exit(&fd);
return res;
}
static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct inode *inode = NULL;
hfs_cat_rec rec;
struct hfs_find_data fd;
int res;
if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
goto out;
inode = HFS_I(dir)->rsrc_inode;
if (inode)
goto out;
inode = new_inode(dir->i_sb);
if (!inode)
return ERR_PTR(-ENOMEM);
res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
if (res) {
iput(inode);
return ERR_PTR(res);
}
fd.search_key->cat = HFS_I(dir)->cat_key;
res = hfs_brec_read(&fd, &rec, sizeof(rec));
if (!res) {
struct hfs_iget_data idata = { NULL, &rec };
hfs_read_inode(inode, &idata);
}
hfs_find_exit(&fd);
if (res) {
iput(inode);
return ERR_PTR(res);
}
HFS_I(inode)->rsrc_inode = dir;
HFS_I(dir)->rsrc_inode = inode;
igrab(dir);
inode_fake_hash(inode);
mark_inode_dirty(inode);
dont_mount(dentry);
out:
return d_splice_alias(inode, dentry);
}
void hfs_evict_inode(struct inode *inode)
{
mm + fs: store shadow entries in page cache Reclaim will be leaving shadow entries in the page cache radix tree upon evicting the real page. As those pages are found from the LRU, an iput() can lead to the inode being freed concurrently. At this point, reclaim must no longer install shadow pages because the inode freeing code needs to ensure the page tree is really empty. Add an address_space flag, AS_EXITING, that the inode freeing code sets under the tree lock before doing the final truncate. Reclaim will check for this flag before installing shadow pages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Metin Doslu <metin@citusdata.com> Cc: Michel Lespinasse <walken@google.com> Cc: Ozgun Erdogan <ozgun@citusdata.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <klamm@yandex-team.ru> Cc: Ryan Mallon <rmallon@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-03 21:47:49 +00:00
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
iput(HFS_I(inode)->rsrc_inode);
}
}
static int hfs_file_open(struct inode *inode, struct file *file)
{
if (HFS_IS_RSRC(inode))
inode = HFS_I(inode)->rsrc_inode;
atomic_inc(&HFS_I(inode)->opencnt);
return 0;
}
static int hfs_file_release(struct inode *inode, struct file *file)
{
//struct super_block *sb = inode->i_sb;
if (HFS_IS_RSRC(inode))
inode = HFS_I(inode)->rsrc_inode;
if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
inode_lock(inode);
hfs_file_truncate(inode);
//if (inode->i_flags & S_DEAD) {
// hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
// hfs_delete_inode(inode);
//}
inode_unlock(inode);
}
return 0;
}
/*
* hfs_notify_change()
*
* Based very closely on fs/msdos/inode.c by Werner Almesberger
*
* This is the notify_change() field in the super_operations structure
* for HFS file systems. The purpose is to take that changes made to
* an inode and apply then in a filesystem-dependent manner. In this
* case the process has a few of tasks to do:
* 1) prevent changes to the i_uid and i_gid fields.
* 2) map file permissions to the closest allowable permissions
* 3) Since multiple Linux files can share the same on-disk inode under
* HFS (for instance the data and resource forks of a file) a change
* to permissions must be applied to all other in-core inodes which
* correspond to the same HFS file.
*/
int hfs_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
int error;
error = setattr_prepare(&nop_mnt_idmap, dentry,
attr); /* basic permission checks */
if (error)
return error;
/* no uig/gid changes and limit which mode bits can be set */
if (((attr->ia_valid & ATTR_UID) &&
(!uid_eq(attr->ia_uid, hsb->s_uid))) ||
((attr->ia_valid & ATTR_GID) &&
(!gid_eq(attr->ia_gid, hsb->s_gid))) ||
((attr->ia_valid & ATTR_MODE) &&
((S_ISDIR(inode->i_mode) &&
(attr->ia_mode != inode->i_mode)) ||
(attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
return hsb->s_quiet ? 0 : error;
}
if (attr->ia_valid & ATTR_MODE) {
/* Only the 'w' bits can ever change and only all together. */
if (attr->ia_mode & S_IWUSR)
attr->ia_mode = inode->i_mode | S_IWUGO;
else
attr->ia_mode = inode->i_mode & ~S_IWUGO;
attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
}
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
inode_dio_wait(inode);
error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
truncate_setsize(inode, attr->ia_size);
hfs_file_truncate(inode);
simple_inode_init_ts(inode);
}
setattr_copy(&nop_mnt_idmap, inode, attr);
mark_inode_dirty(inode);
return 0;
}
static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = filp->f_mapping->host;
struct super_block * sb;
int ret, err;
ret = file_write_and_wait_range(filp, start, end);
if (ret)
return ret;
inode_lock(inode);
/* sync the inode to buffers */
ret = write_inode_now(inode, 0);
/* sync the superblock to buffers */
sb = inode->i_sb;
workqueue: deprecate flush[_delayed]_work_sync() flush[_delayed]_work_sync() are now spurious. Mark them deprecated and convert all users to flush[_delayed]_work(). If you're cc'd and wondering what's going on: Now all workqueues are non-reentrant and the regular flushes guarantee that the work item is not pending or running on any CPU on return, so there's no reason to use the sync flushes at all and they're going away. This patch doesn't make any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Mattia Dongili <malattia@linux.it> Cc: Kent Yoder <key@linux.vnet.ibm.com> Cc: David Airlie <airlied@linux.ie> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Karsten Keil <isdn@linux-pingi.de> Cc: Bryan Wu <bryan.wu@canonical.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Florian Tobias Schandinat <FlorianSchandinat@gmx.de> Cc: David Woodhouse <dwmw2@infradead.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: linux-wireless@vger.kernel.org Cc: Anton Vorontsov <cbou@mail.ru> Cc: Sangbeom Kim <sbkim73@samsung.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Eric Van Hensbergen <ericvh@gmail.com> Cc: Takashi Iwai <tiwai@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Petr Vandrovec <petr@vandrovec.name> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Avi Kivity <avi@redhat.com>
2012-08-20 21:51:24 +00:00
flush_delayed_work(&HFS_SB(sb)->mdb_work);
/* .. finally sync the buffers to disk */
err = sync_blockdev(sb->s_bdev);
if (!ret)
ret = err;
inode_unlock(inode);
return ret;
}
static const struct file_operations hfs_file_operations = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap = generic_file_mmap,
.splice_read = filemap_splice_read,
.fsync = hfs_file_fsync,
.open = hfs_file_open,
.release = hfs_file_release,
};
static const struct inode_operations hfs_file_inode_operations = {
.lookup = hfs_file_lookup,
.setattr = hfs_inode_setattr,
.listxattr = generic_listxattr,
};