mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
9f04609f74
Almost all callers have a folio now, so change __block_write_begin() to take a folio and remove a call to compound_head(). Reviewed-by: Josef Bacik <josef@toxicpanda.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Christian Brauner <brauner@kernel.org>
736 lines
19 KiB
C
736 lines
19 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* linux/fs/minix/inode.c
|
|
*
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
*
|
|
* Copyright (C) 1996 Gertjan van Wingerde
|
|
* Minix V2 fs support.
|
|
*
|
|
* Modified for 680x0 by Andreas Schwab
|
|
* Updated to filesystem version 3 by Daniel Aragones
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include "minix.h"
|
|
#include <linux/buffer_head.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/init.h>
|
|
#include <linux/highuid.h>
|
|
#include <linux/mpage.h>
|
|
#include <linux/vfs.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/fs_context.h>
|
|
|
|
static int minix_write_inode(struct inode *inode,
|
|
struct writeback_control *wbc);
|
|
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
|
|
|
|
static void minix_evict_inode(struct inode *inode)
|
|
{
|
|
truncate_inode_pages_final(&inode->i_data);
|
|
if (!inode->i_nlink) {
|
|
inode->i_size = 0;
|
|
minix_truncate(inode);
|
|
}
|
|
invalidate_inode_buffers(inode);
|
|
clear_inode(inode);
|
|
if (!inode->i_nlink)
|
|
minix_free_inode(inode);
|
|
}
|
|
|
|
static void minix_put_super(struct super_block *sb)
|
|
{
|
|
int i;
|
|
struct minix_sb_info *sbi = minix_sb(sb);
|
|
|
|
if (!sb_rdonly(sb)) {
|
|
if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
|
|
sbi->s_ms->s_state = sbi->s_mount_state;
|
|
mark_buffer_dirty(sbi->s_sbh);
|
|
}
|
|
for (i = 0; i < sbi->s_imap_blocks; i++)
|
|
brelse(sbi->s_imap[i]);
|
|
for (i = 0; i < sbi->s_zmap_blocks; i++)
|
|
brelse(sbi->s_zmap[i]);
|
|
brelse (sbi->s_sbh);
|
|
kfree(sbi->s_imap);
|
|
sb->s_fs_info = NULL;
|
|
kfree(sbi);
|
|
}
|
|
|
|
static struct kmem_cache * minix_inode_cachep;
|
|
|
|
static struct inode *minix_alloc_inode(struct super_block *sb)
|
|
{
|
|
struct minix_inode_info *ei;
|
|
ei = alloc_inode_sb(sb, minix_inode_cachep, GFP_KERNEL);
|
|
if (!ei)
|
|
return NULL;
|
|
return &ei->vfs_inode;
|
|
}
|
|
|
|
static void minix_free_in_core_inode(struct inode *inode)
|
|
{
|
|
kmem_cache_free(minix_inode_cachep, minix_i(inode));
|
|
}
|
|
|
|
static void init_once(void *foo)
|
|
{
|
|
struct minix_inode_info *ei = (struct minix_inode_info *) foo;
|
|
|
|
inode_init_once(&ei->vfs_inode);
|
|
}
|
|
|
|
static int __init init_inodecache(void)
|
|
{
|
|
minix_inode_cachep = kmem_cache_create("minix_inode_cache",
|
|
sizeof(struct minix_inode_info),
|
|
0, (SLAB_RECLAIM_ACCOUNT|
|
|
SLAB_ACCOUNT),
|
|
init_once);
|
|
if (minix_inode_cachep == NULL)
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
|
|
static void destroy_inodecache(void)
|
|
{
|
|
/*
|
|
* Make sure all delayed rcu free inodes are flushed before we
|
|
* destroy cache.
|
|
*/
|
|
rcu_barrier();
|
|
kmem_cache_destroy(minix_inode_cachep);
|
|
}
|
|
|
|
static const struct super_operations minix_sops = {
|
|
.alloc_inode = minix_alloc_inode,
|
|
.free_inode = minix_free_in_core_inode,
|
|
.write_inode = minix_write_inode,
|
|
.evict_inode = minix_evict_inode,
|
|
.put_super = minix_put_super,
|
|
.statfs = minix_statfs,
|
|
};
|
|
|
|
static int minix_reconfigure(struct fs_context *fc)
|
|
{
|
|
struct minix_super_block * ms;
|
|
struct super_block *sb = fc->root->d_sb;
|
|
struct minix_sb_info * sbi = sb->s_fs_info;
|
|
|
|
sync_filesystem(sb);
|
|
ms = sbi->s_ms;
|
|
if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
|
|
return 0;
|
|
if (fc->sb_flags & SB_RDONLY) {
|
|
if (ms->s_state & MINIX_VALID_FS ||
|
|
!(sbi->s_mount_state & MINIX_VALID_FS))
|
|
return 0;
|
|
/* Mounting a rw partition read-only. */
|
|
if (sbi->s_version != MINIX_V3)
|
|
ms->s_state = sbi->s_mount_state;
|
|
mark_buffer_dirty(sbi->s_sbh);
|
|
} else {
|
|
/* Mount a partition which is read-only, read-write. */
|
|
if (sbi->s_version != MINIX_V3) {
|
|
sbi->s_mount_state = ms->s_state;
|
|
ms->s_state &= ~MINIX_VALID_FS;
|
|
} else {
|
|
sbi->s_mount_state = MINIX_VALID_FS;
|
|
}
|
|
mark_buffer_dirty(sbi->s_sbh);
|
|
|
|
if (!(sbi->s_mount_state & MINIX_VALID_FS))
|
|
printk("MINIX-fs warning: remounting unchecked fs, "
|
|
"running fsck is recommended\n");
|
|
else if ((sbi->s_mount_state & MINIX_ERROR_FS))
|
|
printk("MINIX-fs warning: remounting fs with errors, "
|
|
"running fsck is recommended\n");
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static bool minix_check_superblock(struct super_block *sb)
|
|
{
|
|
struct minix_sb_info *sbi = minix_sb(sb);
|
|
|
|
if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
|
|
return false;
|
|
|
|
/*
|
|
* s_max_size must not exceed the block mapping limitation. This check
|
|
* is only needed for V1 filesystems, since V2/V3 support an extra level
|
|
* of indirect blocks which places the limit well above U32_MAX.
|
|
*/
|
|
if (sbi->s_version == MINIX_V1 &&
|
|
sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
static int minix_fill_super(struct super_block *s, struct fs_context *fc)
|
|
{
|
|
struct buffer_head *bh;
|
|
struct buffer_head **map;
|
|
struct minix_super_block *ms;
|
|
struct minix3_super_block *m3s = NULL;
|
|
unsigned long i, block;
|
|
struct inode *root_inode;
|
|
struct minix_sb_info *sbi;
|
|
int ret = -EINVAL;
|
|
int silent = fc->sb_flags & SB_SILENT;
|
|
|
|
sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
|
|
if (!sbi)
|
|
return -ENOMEM;
|
|
s->s_fs_info = sbi;
|
|
|
|
BUILD_BUG_ON(32 != sizeof (struct minix_inode));
|
|
BUILD_BUG_ON(64 != sizeof(struct minix2_inode));
|
|
|
|
if (!sb_set_blocksize(s, BLOCK_SIZE))
|
|
goto out_bad_hblock;
|
|
|
|
if (!(bh = sb_bread(s, 1)))
|
|
goto out_bad_sb;
|
|
|
|
ms = (struct minix_super_block *) bh->b_data;
|
|
sbi->s_ms = ms;
|
|
sbi->s_sbh = bh;
|
|
sbi->s_mount_state = ms->s_state;
|
|
sbi->s_ninodes = ms->s_ninodes;
|
|
sbi->s_nzones = ms->s_nzones;
|
|
sbi->s_imap_blocks = ms->s_imap_blocks;
|
|
sbi->s_zmap_blocks = ms->s_zmap_blocks;
|
|
sbi->s_firstdatazone = ms->s_firstdatazone;
|
|
sbi->s_log_zone_size = ms->s_log_zone_size;
|
|
s->s_maxbytes = ms->s_max_size;
|
|
s->s_magic = ms->s_magic;
|
|
if (s->s_magic == MINIX_SUPER_MAGIC) {
|
|
sbi->s_version = MINIX_V1;
|
|
sbi->s_dirsize = 16;
|
|
sbi->s_namelen = 14;
|
|
s->s_max_links = MINIX_LINK_MAX;
|
|
} else if (s->s_magic == MINIX_SUPER_MAGIC2) {
|
|
sbi->s_version = MINIX_V1;
|
|
sbi->s_dirsize = 32;
|
|
sbi->s_namelen = 30;
|
|
s->s_max_links = MINIX_LINK_MAX;
|
|
} else if (s->s_magic == MINIX2_SUPER_MAGIC) {
|
|
sbi->s_version = MINIX_V2;
|
|
sbi->s_nzones = ms->s_zones;
|
|
sbi->s_dirsize = 16;
|
|
sbi->s_namelen = 14;
|
|
s->s_max_links = MINIX2_LINK_MAX;
|
|
} else if (s->s_magic == MINIX2_SUPER_MAGIC2) {
|
|
sbi->s_version = MINIX_V2;
|
|
sbi->s_nzones = ms->s_zones;
|
|
sbi->s_dirsize = 32;
|
|
sbi->s_namelen = 30;
|
|
s->s_max_links = MINIX2_LINK_MAX;
|
|
} else if ( *(__u16 *)(bh->b_data + 24) == MINIX3_SUPER_MAGIC) {
|
|
m3s = (struct minix3_super_block *) bh->b_data;
|
|
s->s_magic = m3s->s_magic;
|
|
sbi->s_imap_blocks = m3s->s_imap_blocks;
|
|
sbi->s_zmap_blocks = m3s->s_zmap_blocks;
|
|
sbi->s_firstdatazone = m3s->s_firstdatazone;
|
|
sbi->s_log_zone_size = m3s->s_log_zone_size;
|
|
s->s_maxbytes = m3s->s_max_size;
|
|
sbi->s_ninodes = m3s->s_ninodes;
|
|
sbi->s_nzones = m3s->s_zones;
|
|
sbi->s_dirsize = 64;
|
|
sbi->s_namelen = 60;
|
|
sbi->s_version = MINIX_V3;
|
|
sbi->s_mount_state = MINIX_VALID_FS;
|
|
sb_set_blocksize(s, m3s->s_blocksize);
|
|
s->s_max_links = MINIX2_LINK_MAX;
|
|
} else
|
|
goto out_no_fs;
|
|
|
|
if (!minix_check_superblock(s))
|
|
goto out_illegal_sb;
|
|
|
|
/*
|
|
* Allocate the buffer map to keep the superblock small.
|
|
*/
|
|
i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
|
|
map = kzalloc(i, GFP_KERNEL);
|
|
if (!map)
|
|
goto out_no_map;
|
|
sbi->s_imap = &map[0];
|
|
sbi->s_zmap = &map[sbi->s_imap_blocks];
|
|
|
|
block=2;
|
|
for (i=0 ; i < sbi->s_imap_blocks ; i++) {
|
|
if (!(sbi->s_imap[i]=sb_bread(s, block)))
|
|
goto out_no_bitmap;
|
|
block++;
|
|
}
|
|
for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
|
|
if (!(sbi->s_zmap[i]=sb_bread(s, block)))
|
|
goto out_no_bitmap;
|
|
block++;
|
|
}
|
|
|
|
minix_set_bit(0,sbi->s_imap[0]->b_data);
|
|
minix_set_bit(0,sbi->s_zmap[0]->b_data);
|
|
|
|
/* Apparently minix can create filesystems that allocate more blocks for
|
|
* the bitmaps than needed. We simply ignore that, but verify it didn't
|
|
* create one with not enough blocks and bail out if so.
|
|
*/
|
|
block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize);
|
|
if (sbi->s_imap_blocks < block) {
|
|
printk("MINIX-fs: file system does not have enough "
|
|
"imap blocks allocated. Refusing to mount.\n");
|
|
goto out_no_bitmap;
|
|
}
|
|
|
|
block = minix_blocks_needed(
|
|
(sbi->s_nzones - sbi->s_firstdatazone + 1),
|
|
s->s_blocksize);
|
|
if (sbi->s_zmap_blocks < block) {
|
|
printk("MINIX-fs: file system does not have enough "
|
|
"zmap blocks allocated. Refusing to mount.\n");
|
|
goto out_no_bitmap;
|
|
}
|
|
|
|
/* set up enough so that it can read an inode */
|
|
s->s_op = &minix_sops;
|
|
s->s_time_min = 0;
|
|
s->s_time_max = U32_MAX;
|
|
root_inode = minix_iget(s, MINIX_ROOT_INO);
|
|
if (IS_ERR(root_inode)) {
|
|
ret = PTR_ERR(root_inode);
|
|
goto out_no_root;
|
|
}
|
|
|
|
ret = -ENOMEM;
|
|
s->s_root = d_make_root(root_inode);
|
|
if (!s->s_root)
|
|
goto out_no_root;
|
|
|
|
if (!sb_rdonly(s)) {
|
|
if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
|
|
ms->s_state &= ~MINIX_VALID_FS;
|
|
mark_buffer_dirty(bh);
|
|
}
|
|
if (!(sbi->s_mount_state & MINIX_VALID_FS))
|
|
printk("MINIX-fs: mounting unchecked file system, "
|
|
"running fsck is recommended\n");
|
|
else if (sbi->s_mount_state & MINIX_ERROR_FS)
|
|
printk("MINIX-fs: mounting file system with errors, "
|
|
"running fsck is recommended\n");
|
|
|
|
return 0;
|
|
|
|
out_no_root:
|
|
if (!silent)
|
|
printk("MINIX-fs: get root inode failed\n");
|
|
goto out_freemap;
|
|
|
|
out_no_bitmap:
|
|
printk("MINIX-fs: bad superblock or unable to read bitmaps\n");
|
|
out_freemap:
|
|
for (i = 0; i < sbi->s_imap_blocks; i++)
|
|
brelse(sbi->s_imap[i]);
|
|
for (i = 0; i < sbi->s_zmap_blocks; i++)
|
|
brelse(sbi->s_zmap[i]);
|
|
kfree(sbi->s_imap);
|
|
goto out_release;
|
|
|
|
out_no_map:
|
|
ret = -ENOMEM;
|
|
if (!silent)
|
|
printk("MINIX-fs: can't allocate map\n");
|
|
goto out_release;
|
|
|
|
out_illegal_sb:
|
|
if (!silent)
|
|
printk("MINIX-fs: bad superblock\n");
|
|
goto out_release;
|
|
|
|
out_no_fs:
|
|
if (!silent)
|
|
printk("VFS: Can't find a Minix filesystem V1 | V2 | V3 "
|
|
"on device %s.\n", s->s_id);
|
|
out_release:
|
|
brelse(bh);
|
|
goto out;
|
|
|
|
out_bad_hblock:
|
|
printk("MINIX-fs: blocksize too small for device\n");
|
|
goto out;
|
|
|
|
out_bad_sb:
|
|
printk("MINIX-fs: unable to read superblock\n");
|
|
out:
|
|
s->s_fs_info = NULL;
|
|
kfree(sbi);
|
|
return ret;
|
|
}
|
|
|
|
static int minix_get_tree(struct fs_context *fc)
|
|
{
|
|
return get_tree_bdev(fc, minix_fill_super);
|
|
}
|
|
|
|
static const struct fs_context_operations minix_context_ops = {
|
|
.get_tree = minix_get_tree,
|
|
.reconfigure = minix_reconfigure,
|
|
};
|
|
|
|
static int minix_init_fs_context(struct fs_context *fc)
|
|
{
|
|
fc->ops = &minix_context_ops;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|
{
|
|
struct super_block *sb = dentry->d_sb;
|
|
struct minix_sb_info *sbi = minix_sb(sb);
|
|
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
|
|
buf->f_type = sb->s_magic;
|
|
buf->f_bsize = sb->s_blocksize;
|
|
buf->f_blocks = (sbi->s_nzones - sbi->s_firstdatazone) << sbi->s_log_zone_size;
|
|
buf->f_bfree = minix_count_free_blocks(sb);
|
|
buf->f_bavail = buf->f_bfree;
|
|
buf->f_files = sbi->s_ninodes;
|
|
buf->f_ffree = minix_count_free_inodes(sb);
|
|
buf->f_namelen = sbi->s_namelen;
|
|
buf->f_fsid = u64_to_fsid(id);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int minix_get_block(struct inode *inode, sector_t block,
|
|
struct buffer_head *bh_result, int create)
|
|
{
|
|
if (INODE_VERSION(inode) == MINIX_V1)
|
|
return V1_minix_get_block(inode, block, bh_result, create);
|
|
else
|
|
return V2_minix_get_block(inode, block, bh_result, create);
|
|
}
|
|
|
|
static int minix_writepages(struct address_space *mapping,
|
|
struct writeback_control *wbc)
|
|
{
|
|
return mpage_writepages(mapping, wbc, minix_get_block);
|
|
}
|
|
|
|
static int minix_read_folio(struct file *file, struct folio *folio)
|
|
{
|
|
return block_read_full_folio(folio, minix_get_block);
|
|
}
|
|
|
|
int minix_prepare_chunk(struct folio *folio, loff_t pos, unsigned len)
|
|
{
|
|
return __block_write_begin(folio, pos, len, minix_get_block);
|
|
}
|
|
|
|
static void minix_write_failed(struct address_space *mapping, loff_t to)
|
|
{
|
|
struct inode *inode = mapping->host;
|
|
|
|
if (to > inode->i_size) {
|
|
truncate_pagecache(inode, inode->i_size);
|
|
minix_truncate(inode);
|
|
}
|
|
}
|
|
|
|
static int minix_write_begin(struct file *file, struct address_space *mapping,
|
|
loff_t pos, unsigned len,
|
|
struct folio **foliop, void **fsdata)
|
|
{
|
|
int ret;
|
|
|
|
ret = block_write_begin(mapping, pos, len, foliop, minix_get_block);
|
|
if (unlikely(ret))
|
|
minix_write_failed(mapping, pos + len);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static sector_t minix_bmap(struct address_space *mapping, sector_t block)
|
|
{
|
|
return generic_block_bmap(mapping,block,minix_get_block);
|
|
}
|
|
|
|
static const struct address_space_operations minix_aops = {
|
|
.dirty_folio = block_dirty_folio,
|
|
.invalidate_folio = block_invalidate_folio,
|
|
.read_folio = minix_read_folio,
|
|
.writepages = minix_writepages,
|
|
.write_begin = minix_write_begin,
|
|
.write_end = generic_write_end,
|
|
.migrate_folio = buffer_migrate_folio,
|
|
.bmap = minix_bmap,
|
|
.direct_IO = noop_direct_IO
|
|
};
|
|
|
|
static const struct inode_operations minix_symlink_inode_operations = {
|
|
.get_link = page_get_link,
|
|
.getattr = minix_getattr,
|
|
};
|
|
|
|
void minix_set_inode(struct inode *inode, dev_t rdev)
|
|
{
|
|
if (S_ISREG(inode->i_mode)) {
|
|
inode->i_op = &minix_file_inode_operations;
|
|
inode->i_fop = &minix_file_operations;
|
|
inode->i_mapping->a_ops = &minix_aops;
|
|
} else if (S_ISDIR(inode->i_mode)) {
|
|
inode->i_op = &minix_dir_inode_operations;
|
|
inode->i_fop = &minix_dir_operations;
|
|
inode->i_mapping->a_ops = &minix_aops;
|
|
} else if (S_ISLNK(inode->i_mode)) {
|
|
inode->i_op = &minix_symlink_inode_operations;
|
|
inode_nohighmem(inode);
|
|
inode->i_mapping->a_ops = &minix_aops;
|
|
} else
|
|
init_special_inode(inode, inode->i_mode, rdev);
|
|
}
|
|
|
|
/*
|
|
* The minix V1 function to read an inode.
|
|
*/
|
|
static struct inode *V1_minix_iget(struct inode *inode)
|
|
{
|
|
struct buffer_head * bh;
|
|
struct minix_inode * raw_inode;
|
|
struct minix_inode_info *minix_inode = minix_i(inode);
|
|
int i;
|
|
|
|
raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
|
|
if (!raw_inode) {
|
|
iget_failed(inode);
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
if (raw_inode->i_nlinks == 0) {
|
|
printk("MINIX-fs: deleted inode referenced: %lu\n",
|
|
inode->i_ino);
|
|
brelse(bh);
|
|
iget_failed(inode);
|
|
return ERR_PTR(-ESTALE);
|
|
}
|
|
inode->i_mode = raw_inode->i_mode;
|
|
i_uid_write(inode, raw_inode->i_uid);
|
|
i_gid_write(inode, raw_inode->i_gid);
|
|
set_nlink(inode, raw_inode->i_nlinks);
|
|
inode->i_size = raw_inode->i_size;
|
|
inode_set_mtime_to_ts(inode,
|
|
inode_set_atime_to_ts(inode, inode_set_ctime(inode, raw_inode->i_time, 0)));
|
|
inode->i_blocks = 0;
|
|
for (i = 0; i < 9; i++)
|
|
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
|
|
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
|
|
brelse(bh);
|
|
unlock_new_inode(inode);
|
|
return inode;
|
|
}
|
|
|
|
/*
|
|
* The minix V2 function to read an inode.
|
|
*/
|
|
static struct inode *V2_minix_iget(struct inode *inode)
|
|
{
|
|
struct buffer_head * bh;
|
|
struct minix2_inode * raw_inode;
|
|
struct minix_inode_info *minix_inode = minix_i(inode);
|
|
int i;
|
|
|
|
raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
|
|
if (!raw_inode) {
|
|
iget_failed(inode);
|
|
return ERR_PTR(-EIO);
|
|
}
|
|
if (raw_inode->i_nlinks == 0) {
|
|
printk("MINIX-fs: deleted inode referenced: %lu\n",
|
|
inode->i_ino);
|
|
brelse(bh);
|
|
iget_failed(inode);
|
|
return ERR_PTR(-ESTALE);
|
|
}
|
|
inode->i_mode = raw_inode->i_mode;
|
|
i_uid_write(inode, raw_inode->i_uid);
|
|
i_gid_write(inode, raw_inode->i_gid);
|
|
set_nlink(inode, raw_inode->i_nlinks);
|
|
inode->i_size = raw_inode->i_size;
|
|
inode_set_mtime(inode, raw_inode->i_mtime, 0);
|
|
inode_set_atime(inode, raw_inode->i_atime, 0);
|
|
inode_set_ctime(inode, raw_inode->i_ctime, 0);
|
|
inode->i_blocks = 0;
|
|
for (i = 0; i < 10; i++)
|
|
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
|
|
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
|
|
brelse(bh);
|
|
unlock_new_inode(inode);
|
|
return inode;
|
|
}
|
|
|
|
/*
|
|
* The global function to read an inode.
|
|
*/
|
|
struct inode *minix_iget(struct super_block *sb, unsigned long ino)
|
|
{
|
|
struct inode *inode;
|
|
|
|
inode = iget_locked(sb, ino);
|
|
if (!inode)
|
|
return ERR_PTR(-ENOMEM);
|
|
if (!(inode->i_state & I_NEW))
|
|
return inode;
|
|
|
|
if (INODE_VERSION(inode) == MINIX_V1)
|
|
return V1_minix_iget(inode);
|
|
else
|
|
return V2_minix_iget(inode);
|
|
}
|
|
|
|
/*
|
|
* The minix V1 function to synchronize an inode.
|
|
*/
|
|
static struct buffer_head * V1_minix_update_inode(struct inode * inode)
|
|
{
|
|
struct buffer_head * bh;
|
|
struct minix_inode * raw_inode;
|
|
struct minix_inode_info *minix_inode = minix_i(inode);
|
|
int i;
|
|
|
|
raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
|
|
if (!raw_inode)
|
|
return NULL;
|
|
raw_inode->i_mode = inode->i_mode;
|
|
raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
|
|
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
|
|
raw_inode->i_nlinks = inode->i_nlink;
|
|
raw_inode->i_size = inode->i_size;
|
|
raw_inode->i_time = inode_get_mtime_sec(inode);
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
|
|
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
|
|
else for (i = 0; i < 9; i++)
|
|
raw_inode->i_zone[i] = minix_inode->u.i1_data[i];
|
|
mark_buffer_dirty(bh);
|
|
return bh;
|
|
}
|
|
|
|
/*
|
|
* The minix V2 function to synchronize an inode.
|
|
*/
|
|
static struct buffer_head * V2_minix_update_inode(struct inode * inode)
|
|
{
|
|
struct buffer_head * bh;
|
|
struct minix2_inode * raw_inode;
|
|
struct minix_inode_info *minix_inode = minix_i(inode);
|
|
int i;
|
|
|
|
raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
|
|
if (!raw_inode)
|
|
return NULL;
|
|
raw_inode->i_mode = inode->i_mode;
|
|
raw_inode->i_uid = fs_high2lowuid(i_uid_read(inode));
|
|
raw_inode->i_gid = fs_high2lowgid(i_gid_read(inode));
|
|
raw_inode->i_nlinks = inode->i_nlink;
|
|
raw_inode->i_size = inode->i_size;
|
|
raw_inode->i_mtime = inode_get_mtime_sec(inode);
|
|
raw_inode->i_atime = inode_get_atime_sec(inode);
|
|
raw_inode->i_ctime = inode_get_ctime_sec(inode);
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
|
|
raw_inode->i_zone[0] = old_encode_dev(inode->i_rdev);
|
|
else for (i = 0; i < 10; i++)
|
|
raw_inode->i_zone[i] = minix_inode->u.i2_data[i];
|
|
mark_buffer_dirty(bh);
|
|
return bh;
|
|
}
|
|
|
|
static int minix_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|
{
|
|
int err = 0;
|
|
struct buffer_head *bh;
|
|
|
|
if (INODE_VERSION(inode) == MINIX_V1)
|
|
bh = V1_minix_update_inode(inode);
|
|
else
|
|
bh = V2_minix_update_inode(inode);
|
|
if (!bh)
|
|
return -EIO;
|
|
if (wbc->sync_mode == WB_SYNC_ALL && buffer_dirty(bh)) {
|
|
sync_dirty_buffer(bh);
|
|
if (buffer_req(bh) && !buffer_uptodate(bh)) {
|
|
printk("IO error syncing minix inode [%s:%08lx]\n",
|
|
inode->i_sb->s_id, inode->i_ino);
|
|
err = -EIO;
|
|
}
|
|
}
|
|
brelse (bh);
|
|
return err;
|
|
}
|
|
|
|
int minix_getattr(struct mnt_idmap *idmap, const struct path *path,
|
|
struct kstat *stat, u32 request_mask, unsigned int flags)
|
|
{
|
|
struct super_block *sb = path->dentry->d_sb;
|
|
struct inode *inode = d_inode(path->dentry);
|
|
|
|
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
|
|
if (INODE_VERSION(inode) == MINIX_V1)
|
|
stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb);
|
|
else
|
|
stat->blocks = (sb->s_blocksize / 512) * V2_minix_blocks(stat->size, sb);
|
|
stat->blksize = sb->s_blocksize;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* The function that is called for file truncation.
|
|
*/
|
|
void minix_truncate(struct inode * inode)
|
|
{
|
|
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
|
|
return;
|
|
if (INODE_VERSION(inode) == MINIX_V1)
|
|
V1_minix_truncate(inode);
|
|
else
|
|
V2_minix_truncate(inode);
|
|
}
|
|
|
|
static struct file_system_type minix_fs_type = {
|
|
.owner = THIS_MODULE,
|
|
.name = "minix",
|
|
.kill_sb = kill_block_super,
|
|
.fs_flags = FS_REQUIRES_DEV,
|
|
.init_fs_context = minix_init_fs_context,
|
|
};
|
|
MODULE_ALIAS_FS("minix");
|
|
|
|
static int __init init_minix_fs(void)
|
|
{
|
|
int err = init_inodecache();
|
|
if (err)
|
|
goto out1;
|
|
err = register_filesystem(&minix_fs_type);
|
|
if (err)
|
|
goto out;
|
|
return 0;
|
|
out:
|
|
destroy_inodecache();
|
|
out1:
|
|
return err;
|
|
}
|
|
|
|
static void __exit exit_minix_fs(void)
|
|
{
|
|
unregister_filesystem(&minix_fs_type);
|
|
destroy_inodecache();
|
|
}
|
|
|
|
module_init(init_minix_fs)
|
|
module_exit(exit_minix_fs)
|
|
MODULE_DESCRIPTION("Minix file system");
|
|
MODULE_LICENSE("GPL");
|
|
|