f2fs-fix-5.19
This includes some urgent fixes to avoid generating corrupted inodes caused by compressed and inline_data files. In addition, another patch tries to avoid wrong error report which prevents a roll-forward recovery. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE00UqedjCtOrGVvQiQBSofoJIUNIFAmK2WREACgkQQBSofoJI UNJtuA//a9/7svQ32hK2/mGE9boK8V1tQEeOnTS79toMOh/AajAAlQyo7PmNuY3Z CkvT3wFJ7KzTgHZ7pHSAMdXX3grb+xs9vGqVdp6ICE4Le3p1QSdIaX7XCtTuhB3t p5u7yMuPorDFFKTJ9Ijq6/3xiS/qoKLCITAgzxMW8fdJzgJGU9qM2XMFw6r7fQnq sCQAJLGI0mZUkL0eDeb5iBTup9fSh3O5VEtXiOxqOI97tyUpeCt68PfTT3xW6viB u0QVaxTQYyM9/e61KpdgbhX7pfhz3mWsUgCvTZ9nH2siM9j0tWm3Q/vtMdnH1ETk bau2100B/hDywkulGrRYDmiYBbFQ/DZyPXxnE8kxe5AOejq47t1HDEmzd+fnac1x 1eHSSw/ZKVEMlQX0bGDSRBJM7hZBfCdq4cj5GbswQ8vsYJ/1FYKWTi8T6s8fYTD3 6QPkDxKDHemcbNbbFnHlBjxrb+L1QmVZK+WDqmTe9Nh2G1Er/nnhjM3T7D6iOJG9 9egE+37r90Z/I3CFOKelMxJ1cpVq7/baunCSe1sN7y40WwLfUOfkATctl8TyuN/1 gwLshYdTrvn6m5GKNkL/Nsu4o5HewIak+SJdP3HXahEk1ZMzVPWvz+xb5CnbziJk U0gc7rwhc8rpTjePTVYmOeaYwDJi6WTIjRQqhW6CxdkTYB2ttPA= =m3Fh -----END PGP SIGNATURE----- Merge tag 'f2fs-for-5.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs Pull f2fs fixes from Jaegeuk Kim: "Some urgent fixes to avoid generating corrupted inodes caused by compressed and inline_data files. In addition, avoid a wrong error report which prevents a roll-forward recovery" * tag 'f2fs-for-5.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: f2fs: do not count ENOENT for error case f2fs: fix iostat related lock protection f2fs: attach inline_data after setting compression
This commit is contained in:
commit
29eeafc661
@ -91,8 +91,9 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
|
||||
unsigned int cnt;
|
||||
struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
|
||||
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_bh(&sbi->iostat_lat_lock);
|
||||
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
|
||||
for (idx = 0; idx < MAX_IO_TYPE; idx++) {
|
||||
for (io = 0; io < NR_PAGE_TYPE; io++) {
|
||||
cnt = io_lat->bio_cnt[idx][io];
|
||||
@ -106,7 +107,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
|
||||
io_lat->bio_cnt[idx][io] = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&sbi->iostat_lat_lock);
|
||||
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
|
||||
|
||||
trace_f2fs_iostat_latency(sbi, iostat_lat);
|
||||
}
|
||||
@ -115,14 +116,15 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
unsigned long long iostat_diff[NR_IO_TYPE];
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
if (time_is_after_jiffies(sbi->iostat_next_period))
|
||||
return;
|
||||
|
||||
/* Need double check under the lock */
|
||||
spin_lock_bh(&sbi->iostat_lock);
|
||||
spin_lock_irqsave(&sbi->iostat_lock, flags);
|
||||
if (time_is_after_jiffies(sbi->iostat_next_period)) {
|
||||
spin_unlock_bh(&sbi->iostat_lock);
|
||||
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
||||
return;
|
||||
}
|
||||
sbi->iostat_next_period = jiffies +
|
||||
@ -133,7 +135,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
|
||||
sbi->prev_rw_iostat[i];
|
||||
sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
|
||||
}
|
||||
spin_unlock_bh(&sbi->iostat_lock);
|
||||
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
||||
|
||||
trace_f2fs_iostat(sbi, iostat_diff);
|
||||
|
||||
@ -145,25 +147,27 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
|
||||
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&sbi->iostat_lock);
|
||||
spin_lock_irq(&sbi->iostat_lock);
|
||||
for (i = 0; i < NR_IO_TYPE; i++) {
|
||||
sbi->rw_iostat[i] = 0;
|
||||
sbi->prev_rw_iostat[i] = 0;
|
||||
}
|
||||
spin_unlock_bh(&sbi->iostat_lock);
|
||||
spin_unlock_irq(&sbi->iostat_lock);
|
||||
|
||||
spin_lock_bh(&sbi->iostat_lat_lock);
|
||||
spin_lock_irq(&sbi->iostat_lat_lock);
|
||||
memset(io_lat, 0, sizeof(struct iostat_lat_info));
|
||||
spin_unlock_bh(&sbi->iostat_lat_lock);
|
||||
spin_unlock_irq(&sbi->iostat_lat_lock);
|
||||
}
|
||||
|
||||
void f2fs_update_iostat(struct f2fs_sb_info *sbi,
|
||||
enum iostat_type type, unsigned long long io_bytes)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!sbi->iostat_enable)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&sbi->iostat_lock);
|
||||
spin_lock_irqsave(&sbi->iostat_lock, flags);
|
||||
sbi->rw_iostat[type] += io_bytes;
|
||||
|
||||
if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
|
||||
@ -172,7 +176,7 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi,
|
||||
if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
|
||||
sbi->rw_iostat[APP_READ_IO] += io_bytes;
|
||||
|
||||
spin_unlock_bh(&sbi->iostat_lock);
|
||||
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
|
||||
|
||||
f2fs_record_iostat(sbi);
|
||||
}
|
||||
@ -185,6 +189,7 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
|
||||
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
|
||||
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
|
||||
int idx;
|
||||
unsigned long flags;
|
||||
|
||||
if (!sbi->iostat_enable)
|
||||
return;
|
||||
@ -202,12 +207,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
|
||||
idx = WRITE_ASYNC_IO;
|
||||
}
|
||||
|
||||
spin_lock_bh(&sbi->iostat_lat_lock);
|
||||
spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
|
||||
io_lat->sum_lat[idx][iotype] += ts_diff;
|
||||
io_lat->bio_cnt[idx][iotype]++;
|
||||
if (ts_diff > io_lat->peak_lat[idx][iotype])
|
||||
io_lat->peak_lat[idx][iotype] = ts_diff;
|
||||
spin_unlock_bh(&sbi->iostat_lat_lock);
|
||||
spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
|
||||
}
|
||||
|
||||
void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
|
||||
|
@ -89,8 +89,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
|
||||
if (test_opt(sbi, INLINE_XATTR))
|
||||
set_inode_flag(inode, FI_INLINE_XATTR);
|
||||
|
||||
if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
|
||||
set_inode_flag(inode, FI_INLINE_DATA);
|
||||
if (f2fs_may_inline_dentry(inode))
|
||||
set_inode_flag(inode, FI_INLINE_DENTRY);
|
||||
|
||||
@ -107,10 +105,6 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
|
||||
|
||||
f2fs_init_extent_tree(inode, NULL);
|
||||
|
||||
stat_inc_inline_xattr(inode);
|
||||
stat_inc_inline_inode(inode);
|
||||
stat_inc_inline_dir(inode);
|
||||
|
||||
F2FS_I(inode)->i_flags =
|
||||
f2fs_mask_flags(mode, F2FS_I(dir)->i_flags & F2FS_FL_INHERITED);
|
||||
|
||||
@ -127,6 +121,14 @@ static struct inode *f2fs_new_inode(struct user_namespace *mnt_userns,
|
||||
set_compress_context(inode);
|
||||
}
|
||||
|
||||
/* Should enable inline_data after compression set */
|
||||
if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
|
||||
set_inode_flag(inode, FI_INLINE_DATA);
|
||||
|
||||
stat_inc_inline_xattr(inode);
|
||||
stat_inc_inline_inode(inode);
|
||||
stat_inc_inline_dir(inode);
|
||||
|
||||
f2fs_set_inode_flags(inode);
|
||||
|
||||
trace_f2fs_new_inode(inode, 0);
|
||||
@ -325,6 +327,9 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
if (!is_extension_exist(name, ext[i], false))
|
||||
continue;
|
||||
|
||||
/* Do not use inline_data with compression */
|
||||
stat_dec_inline_inode(inode);
|
||||
clear_inode_flag(inode, FI_INLINE_DATA);
|
||||
set_compress_context(inode);
|
||||
return;
|
||||
}
|
||||
|
@ -1450,7 +1450,9 @@ page_hit:
|
||||
out_err:
|
||||
ClearPageUptodate(page);
|
||||
out_put_err:
|
||||
f2fs_handle_page_eio(sbi, page->index, NODE);
|
||||
/* ENOENT comes from read_node_page which is not an error. */
|
||||
if (err != -ENOENT)
|
||||
f2fs_handle_page_eio(sbi, page->index, NODE);
|
||||
f2fs_put_page(page, 1);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user