2017-12-18 03:00:59 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2008-04-29 22:13:32 +00:00
|
|
|
* ext4.h
|
2006-10-11 08:20:50 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 1992, 1993, 1994, 1995
|
|
|
|
* Remy Card (card@masi.ibp.fr)
|
|
|
|
* Laboratoire MASI - Institut Blaise Pascal
|
|
|
|
* Universite Pierre et Marie Curie (Paris VI)
|
|
|
|
*
|
|
|
|
* from
|
|
|
|
*
|
|
|
|
* linux/include/linux/minix_fs.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
2008-04-29 22:13:32 +00:00
|
|
|
#ifndef _EXT4_H
|
|
|
|
#define _EXT4_H
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2021-07-19 05:59:14 +00:00
|
|
|
#include <linux/refcount.h>
|
2006-10-11 08:20:50 +00:00
|
|
|
#include <linux/types.h>
|
2006-10-11 08:21:05 +00:00
|
|
|
#include <linux/blkdev.h>
|
2006-10-11 08:20:50 +00:00
|
|
|
#include <linux/magic.h>
|
2009-01-06 02:49:55 +00:00
|
|
|
#include <linux/jbd2.h>
|
2009-01-22 17:13:05 +00:00
|
|
|
#include <linux/quota.h>
|
2009-05-01 17:44:33 +00:00
|
|
|
#include <linux/rwsem.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/seqlock.h>
|
|
|
|
#include <linux/mutex.h>
|
2009-05-03 20:33:44 +00:00
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/wait.h>
|
2017-02-02 18:15:33 +00:00
|
|
|
#include <linux/sched/signal.h>
|
2009-05-03 20:33:44 +00:00
|
|
|
#include <linux/blockgroup_lock.h>
|
|
|
|
#include <linux/percpu_counter.h>
|
2013-10-18 01:11:01 +00:00
|
|
|
#include <linux/ratelimit.h>
|
2012-04-29 22:27:10 +00:00
|
|
|
#include <crypto/hash.h>
|
2014-02-22 11:18:17 +00:00
|
|
|
#include <linux/falloc.h>
|
2016-04-26 03:22:35 +00:00
|
|
|
#include <linux/percpu-rwsem.h>
|
2020-05-23 07:30:11 +00:00
|
|
|
#include <linux/fiemap.h>
|
2010-05-17 10:00:00 +00:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/compat.h>
|
|
|
|
#endif
|
2023-04-02 02:37:42 +00:00
|
|
|
#include <uapi/linux/ext4.h>
|
2008-01-29 05:19:52 +00:00
|
|
|
|
2017-10-09 19:15:34 +00:00
|
|
|
#include <linux/fscrypt.h>
|
2019-07-22 16:26:24 +00:00
|
|
|
#include <linux/fsverity.h>
|
2017-10-09 19:15:34 +00:00
|
|
|
|
2018-08-27 05:15:11 +00:00
|
|
|
#include <linux/compiler.h>
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2008-07-11 23:27:31 +00:00
|
|
|
* The fourth extended filesystem constants/structures
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
|
|
|
|
2016-03-13 21:18:12 +00:00
|
|
|
/*
|
|
|
|
* with AGGRESSIVE_CHECK allocator runs consistency checks over
|
|
|
|
* structures. these checks slow things down a lot
|
|
|
|
*/
|
|
|
|
#define AGGRESSIVE_CHECK__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* with DOUBLE_CHECK defined mballoc creates persistent in-core
|
|
|
|
* bitmaps, maintains and uses them to check for double allocations
|
|
|
|
*/
|
|
|
|
#define DOUBLE_CHECK__
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* Define EXT4FS_DEBUG to produce debug messages
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#undef EXT4FS_DEBUG
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Debug code
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#ifdef EXT4FS_DEBUG
|
|
|
|
#define ext4_debug(f, a...) \
|
2006-10-11 08:20:50 +00:00
|
|
|
do { \
|
2008-09-09 02:25:24 +00:00
|
|
|
printk(KERN_DEBUG "EXT4-fs DEBUG (%s, %d): %s:", \
|
2008-07-14 01:03:29 +00:00
|
|
|
__FILE__, __LINE__, __func__); \
|
2008-09-09 02:25:24 +00:00
|
|
|
printk(KERN_DEBUG f, ## a); \
|
2006-10-11 08:20:50 +00:00
|
|
|
} while (0)
|
|
|
|
#else
|
2012-03-20 03:11:43 +00:00
|
|
|
#define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif
|
|
|
|
|
2020-05-10 06:24:55 +00:00
|
|
|
/*
|
|
|
|
* Turn on EXT_DEBUG to enable ext4_ext_show_path/leaf/move in extents.c
|
|
|
|
*/
|
|
|
|
#define EXT_DEBUG__
|
|
|
|
|
2012-11-28 18:03:30 +00:00
|
|
|
/*
|
2020-05-10 06:24:55 +00:00
|
|
|
* Dynamic printk for controlled extents debugging.
|
2012-11-28 18:03:30 +00:00
|
|
|
*/
|
2020-05-10 06:24:55 +00:00
|
|
|
#ifdef CONFIG_EXT4_DEBUG
|
|
|
|
#define ext_debug(ino, fmt, ...) \
|
|
|
|
pr_debug("[%s/%d] EXT4-fs (%s): ino %lu: (%s, %d): %s:" fmt, \
|
|
|
|
current->comm, task_pid_nr(current), \
|
|
|
|
ino->i_sb->s_id, ino->i_ino, __FILE__, __LINE__, \
|
|
|
|
__func__, ##__VA_ARGS__)
|
2012-11-28 18:03:30 +00:00
|
|
|
#else
|
2020-05-10 06:24:55 +00:00
|
|
|
#define ext_debug(ino, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
2012-11-28 18:03:30 +00:00
|
|
|
#endif
|
|
|
|
|
2020-11-07 15:58:11 +00:00
|
|
|
#define ASSERT(assert) \
|
|
|
|
do { \
|
|
|
|
if (unlikely(!(assert))) { \
|
|
|
|
printk(KERN_EMERG \
|
|
|
|
"Assertion failure in %s() at %s:%d: '%s'\n", \
|
|
|
|
__func__, __FILE__, __LINE__, #assert); \
|
|
|
|
BUG(); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/* data type for block offset of block group */
|
|
|
|
typedef int ext4_grpblk_t;
|
|
|
|
|
|
|
|
/* data type for filesystem-wide blocks number */
|
|
|
|
typedef unsigned long long ext4_fsblk_t;
|
|
|
|
|
|
|
|
/* data type for file logical block number */
|
|
|
|
typedef __u32 ext4_lblk_t;
|
|
|
|
|
|
|
|
/* data type for block group number */
|
|
|
|
typedef unsigned int ext4_group_t;
|
|
|
|
|
2015-06-09 05:55:03 +00:00
|
|
|
enum SHIFT_DIRECTION {
|
|
|
|
SHIFT_LEFT = 0,
|
|
|
|
SHIFT_RIGHT,
|
|
|
|
};
|
|
|
|
|
2023-05-30 12:33:42 +00:00
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* For each criteria, mballoc has slightly different way of finding
|
|
|
|
* the required blocks nad usually, higher the criteria the slower the
|
|
|
|
* allocation. We start at lower criterias and keep falling back to
|
|
|
|
* higher ones if we are not able to find any blocks. Lower (earlier)
|
|
|
|
* criteria are faster.
|
2023-05-30 12:33:42 +00:00
|
|
|
*/
|
|
|
|
enum criteria {
|
2023-05-30 12:33:50 +00:00
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* Used when number of blocks needed is a power of 2. This
|
|
|
|
* doesn't trigger any disk IO except prefetch and is the
|
|
|
|
* fastest criteria.
|
2023-05-30 12:33:50 +00:00
|
|
|
*/
|
|
|
|
CR_POWER2_ALIGNED,
|
|
|
|
|
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* Tries to lookup in-memory data structures to find the most
|
|
|
|
* suitable group that satisfies goal request. No disk IO
|
|
|
|
* except block prefetch.
|
2023-05-30 12:33:50 +00:00
|
|
|
*/
|
|
|
|
CR_GOAL_LEN_FAST,
|
|
|
|
|
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* Same as CR_GOAL_LEN_FAST but is allowed to reduce the goal
|
|
|
|
* length to the best available length for faster allocation.
|
2023-05-30 12:33:50 +00:00
|
|
|
*/
|
|
|
|
CR_BEST_AVAIL_LEN,
|
|
|
|
|
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* Reads each block group sequentially, performing disk IO if
|
|
|
|
* necessary, to find find_suitable block group. Tries to
|
|
|
|
* allocate goal length but might trim the request if nothing
|
|
|
|
* is found after enough tries.
|
2023-05-30 12:33:50 +00:00
|
|
|
*/
|
|
|
|
CR_GOAL_LEN_SLOW,
|
|
|
|
|
|
|
|
/*
|
2023-06-08 14:39:35 +00:00
|
|
|
* Finds the first free set of blocks and allocates
|
|
|
|
* those. This is only used in rare cases when
|
|
|
|
* CR_GOAL_LEN_SLOW also fails to allocate anything.
|
2023-05-30 12:33:50 +00:00
|
|
|
*/
|
|
|
|
CR_ANY_FREE,
|
2023-06-08 14:39:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of criterias defined.
|
|
|
|
*/
|
|
|
|
EXT4_MB_NUM_CRS
|
2023-05-30 12:33:42 +00:00
|
|
|
};
|
|
|
|
|
2009-09-30 04:32:42 +00:00
|
|
|
/*
|
2010-05-17 11:00:00 +00:00
|
|
|
* Flags used in mballoc's allocation_context flags field.
|
2009-09-30 04:32:42 +00:00
|
|
|
*
|
|
|
|
* Also used to show what's going on for debugging purposes when the
|
|
|
|
* flag field is exported via the traceport interface
|
|
|
|
*/
|
2009-05-01 17:44:33 +00:00
|
|
|
|
2008-01-29 05:19:52 +00:00
|
|
|
/* prefer goal again. length */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_MERGE 0x0001
|
2008-01-29 05:19:52 +00:00
|
|
|
/* blocks already reserved */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_RESERVED 0x0002
|
2008-01-29 05:19:52 +00:00
|
|
|
/* metadata is being allocated */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_METADATA 0x0004
|
2008-01-29 05:19:52 +00:00
|
|
|
/* first blocks in the file */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_FIRST 0x0008
|
2008-01-29 05:19:52 +00:00
|
|
|
/* search for the best chunk */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_BEST 0x0010
|
2008-01-29 05:19:52 +00:00
|
|
|
/* data is being allocated */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_DATA 0x0020
|
2008-01-29 05:19:52 +00:00
|
|
|
/* don't preallocate (for tails) */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_NOPREALLOC 0x0040
|
2008-01-29 05:19:52 +00:00
|
|
|
/* allocate for locality group */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_GROUP_ALLOC 0x0080
|
2008-01-29 05:19:52 +00:00
|
|
|
/* allocate goal blocks or none */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_GOAL_ONLY 0x0100
|
2008-01-29 05:19:52 +00:00
|
|
|
/* goal is meaningful */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_HINT_TRY_GOAL 0x0200
|
2008-07-14 21:52:37 +00:00
|
|
|
/* blocks already pre-reserved by delayed allocation */
|
2009-08-09 20:46:13 +00:00
|
|
|
#define EXT4_MB_DELALLOC_RESERVED 0x0400
|
2009-08-10 02:01:13 +00:00
|
|
|
/* We are doing stream allocation */
|
|
|
|
#define EXT4_MB_STREAM_ALLOC 0x0800
|
2011-05-25 11:41:26 +00:00
|
|
|
/* Use reserved root blocks if needed */
|
|
|
|
#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
|
2013-04-10 02:11:22 +00:00
|
|
|
/* Use blocks from reserved pool */
|
|
|
|
#define EXT4_MB_USE_RESERVED 0x2000
|
2020-05-20 06:40:36 +00:00
|
|
|
/* Do strict check for free blocks while retrying block allocation */
|
|
|
|
#define EXT4_MB_STRICT_CHECK 0x4000
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
/* Large fragment size list lookup succeeded at least once for cr = 0 */
|
2023-05-30 12:33:50 +00:00
|
|
|
#define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED 0x8000
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
|
2023-05-30 12:33:50 +00:00
|
|
|
#define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED 0x00010000
|
2023-05-30 12:33:49 +00:00
|
|
|
/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
|
2023-05-30 12:33:50 +00:00
|
|
|
#define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED 0x00020000
|
2023-05-30 12:33:49 +00:00
|
|
|
|
2008-01-29 05:19:52 +00:00
|
|
|
struct ext4_allocation_request {
|
|
|
|
/* target inode for block we're allocating */
|
|
|
|
struct inode *inode;
|
2009-07-13 14:24:17 +00:00
|
|
|
/* how many blocks we want to allocate */
|
|
|
|
unsigned int len;
|
2008-01-29 05:19:52 +00:00
|
|
|
/* logical block in target inode */
|
|
|
|
ext4_lblk_t logical;
|
|
|
|
/* the closest logical allocated block to the left */
|
|
|
|
ext4_lblk_t lleft;
|
|
|
|
/* the closest logical allocated block to the right */
|
|
|
|
ext4_lblk_t lright;
|
2009-07-13 14:24:17 +00:00
|
|
|
/* phys. target (a hint) */
|
|
|
|
ext4_fsblk_t goal;
|
|
|
|
/* phys. block for the closest logical allocated block to the left */
|
|
|
|
ext4_fsblk_t pleft;
|
|
|
|
/* phys. block for the closest logical allocated block to the right */
|
2008-01-29 05:19:52 +00:00
|
|
|
ext4_fsblk_t pright;
|
|
|
|
/* flags. see above EXT4_MB_HINT_* */
|
2008-11-05 05:14:04 +00:00
|
|
|
unsigned int flags;
|
2008-01-29 05:19:52 +00:00
|
|
|
};
|
|
|
|
|
2010-05-16 23:00:00 +00:00
|
|
|
/*
|
|
|
|
* Logical to physical block mapping, used by ext4_map_blocks()
|
|
|
|
*
|
|
|
|
* This structure is used to pass requests into ext4_map_blocks() as
|
|
|
|
* well as to store the information returned by ext4_map_blocks(). It
|
|
|
|
* takes less room on the stack than a struct buffer_head.
|
|
|
|
*/
|
2020-05-10 06:24:51 +00:00
|
|
|
#define EXT4_MAP_NEW BIT(BH_New)
|
|
|
|
#define EXT4_MAP_MAPPED BIT(BH_Mapped)
|
|
|
|
#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
|
|
|
|
#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
|
2010-05-16 23:00:00 +00:00
|
|
|
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
|
2014-11-25 16:41:49 +00:00
|
|
|
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
|
2010-05-16 23:00:00 +00:00
|
|
|
|
|
|
|
struct ext4_map_blocks {
|
|
|
|
ext4_fsblk_t m_pblk;
|
|
|
|
ext4_lblk_t m_lblk;
|
|
|
|
unsigned int m_len;
|
|
|
|
unsigned int m_flags;
|
|
|
|
};
|
|
|
|
|
2019-08-28 15:13:24 +00:00
|
|
|
/*
|
|
|
|
* Block validity checking, system zone rbtree.
|
|
|
|
*/
|
|
|
|
struct ext4_system_blocks {
|
|
|
|
struct rb_root root;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
};
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
/*
|
|
|
|
* Flags for ext4_io_end->flags
|
|
|
|
*/
|
|
|
|
#define EXT4_IO_END_UNWRITTEN 0x0001
|
|
|
|
|
2019-10-16 07:37:10 +00:00
|
|
|
struct ext4_io_end_vec {
|
|
|
|
struct list_head list; /* list of io_end_vec */
|
|
|
|
loff_t offset; /* offset in the file */
|
|
|
|
ssize_t size; /* size of the extent */
|
|
|
|
};
|
|
|
|
|
2012-03-05 15:40:22 +00:00
|
|
|
/*
|
2014-04-21 03:45:47 +00:00
|
|
|
* For converting unwritten extents on a work queue. 'handle' is used for
|
2013-06-04 17:21:11 +00:00
|
|
|
* buffered writeback.
|
2012-03-05 15:40:22 +00:00
|
|
|
*/
|
2009-09-28 19:49:08 +00:00
|
|
|
typedef struct ext4_io_end {
|
2010-07-27 15:56:06 +00:00
|
|
|
struct list_head list; /* per-file finished IO list */
|
2013-06-04 17:21:11 +00:00
|
|
|
handle_t *handle; /* handle reserved for extent
|
|
|
|
* conversion */
|
2009-09-28 19:49:08 +00:00
|
|
|
struct inode *inode; /* file being written to */
|
2013-06-04 18:23:41 +00:00
|
|
|
struct bio *bio; /* Linked list of completed
|
|
|
|
* bios covering the extent */
|
2009-09-28 19:48:29 +00:00
|
|
|
unsigned int flag; /* unwritten or not */
|
2021-07-19 05:59:14 +00:00
|
|
|
refcount_t count; /* reference counter */
|
2019-10-16 07:37:10 +00:00
|
|
|
struct list_head list_vec; /* list of ext4_io_end_vec */
|
2009-09-28 19:49:08 +00:00
|
|
|
} ext4_io_end_t;
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
struct ext4_io_submit {
|
2015-07-22 03:50:24 +00:00
|
|
|
struct writeback_control *io_wbc;
|
2010-10-28 01:30:10 +00:00
|
|
|
struct bio *io_bio;
|
|
|
|
ext4_io_end_t *io_end;
|
|
|
|
sector_t io_next_block;
|
|
|
|
};
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Special inodes numbers
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_BAD_INO 1 /* Bad blocks inode */
|
|
|
|
#define EXT4_ROOT_INO 2 /* Root inode */
|
2011-05-24 23:00:39 +00:00
|
|
|
#define EXT4_USR_QUOTA_INO 3 /* User quota inode */
|
|
|
|
#define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */
|
|
|
|
#define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */
|
|
|
|
#define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */
|
|
|
|
#define EXT4_JOURNAL_INO 8 /* Journal inode */
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
/* First non-reserved inode for old ext4 filesystems */
|
|
|
|
#define EXT4_GOOD_OLD_FIRST_INO 11
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximal count of links to a file
|
|
|
|
*/
|
2007-07-18 12:38:01 +00:00
|
|
|
#define EXT4_LINK_MAX 65000
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Macro-instructions used to manage several block sizes
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MIN_BLOCK_SIZE 1024
|
2008-01-29 04:58:27 +00:00
|
|
|
#define EXT4_MAX_BLOCK_SIZE 65536
|
|
|
|
#define EXT4_MIN_BLOCK_LOG_SIZE 10
|
2010-10-28 01:29:12 +00:00
|
|
|
#define EXT4_MAX_BLOCK_LOG_SIZE 16
|
2016-11-18 18:00:24 +00:00
|
|
|
#define EXT4_MAX_CLUSTER_LOG_SIZE 30
|
2006-10-11 08:20:50 +00:00
|
|
|
#ifdef __KERNEL__
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize)
|
2006-10-11 08:20:50 +00:00
|
|
|
#else
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCK_SIZE(s) (EXT4_MIN_BLOCK_SIZE << (s)->s_log_block_size)
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif
|
2008-09-09 02:25:24 +00:00
|
|
|
#define EXT4_ADDR_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / sizeof(__u32))
|
2011-09-09 22:34:51 +00:00
|
|
|
#define EXT4_CLUSTER_SIZE(s) (EXT4_BLOCK_SIZE(s) << \
|
|
|
|
EXT4_SB(s)->s_cluster_bits)
|
2006-10-11 08:20:50 +00:00
|
|
|
#ifdef __KERNEL__
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCK_SIZE_BITS(s) ((s)->s_blocksize_bits)
|
2011-09-09 22:34:51 +00:00
|
|
|
# define EXT4_CLUSTER_BITS(s) (EXT4_SB(s)->s_cluster_bits)
|
2006-10-11 08:20:50 +00:00
|
|
|
#else
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCK_SIZE_BITS(s) ((s)->s_log_block_size + 10)
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_ADDR_PER_BLOCK_BITS(s) (EXT4_SB(s)->s_addr_per_block_bits)
|
|
|
|
#define EXT4_INODE_SIZE(s) (EXT4_SB(s)->s_inode_size)
|
|
|
|
#define EXT4_FIRST_INO(s) (EXT4_SB(s)->s_first_ino)
|
2006-10-11 08:20:50 +00:00
|
|
|
#else
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_INODE_SIZE(s) (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \
|
|
|
|
EXT4_GOOD_OLD_INODE_SIZE : \
|
2006-10-11 08:20:50 +00:00
|
|
|
(s)->s_inode_size)
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FIRST_INO(s) (((s)->s_rev_level == EXT4_GOOD_OLD_REV) ? \
|
|
|
|
EXT4_GOOD_OLD_FIRST_INO : \
|
2006-10-11 08:20:50 +00:00
|
|
|
(s)->s_first_ino)
|
|
|
|
#endif
|
2007-07-18 01:42:41 +00:00
|
|
|
#define EXT4_BLOCK_ALIGN(size, blkbits) ALIGN((size), (1 << (blkbits)))
|
2016-09-15 15:55:01 +00:00
|
|
|
#define EXT4_MAX_BLOCKS(size, offset, blkbits) \
|
|
|
|
((EXT4_BLOCK_ALIGN(size + offset, blkbits) >> blkbits) - (offset >> \
|
|
|
|
blkbits))
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2011-09-09 22:44:51 +00:00
|
|
|
/* Translate a block number to a cluster number */
|
|
|
|
#define EXT4_B2C(sbi, blk) ((blk) >> (sbi)->s_cluster_bits)
|
|
|
|
/* Translate a cluster number to a block number */
|
|
|
|
#define EXT4_C2B(sbi, cluster) ((cluster) << (sbi)->s_cluster_bits)
|
|
|
|
/* Translate # of blks to # of clusters */
|
|
|
|
#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
|
|
|
|
(sbi)->s_cluster_bits)
|
2013-12-20 14:29:35 +00:00
|
|
|
/* Mask out the low bits to get the starting block of the cluster */
|
|
|
|
#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
|
|
|
|
~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
|
|
|
|
#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
|
|
|
|
~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
|
2019-08-23 03:22:14 +00:00
|
|
|
/* Fill in the low bits to get the last block of the cluster */
|
|
|
|
#define EXT4_LBLK_CFILL(sbi, lblk) ((lblk) | \
|
|
|
|
((ext4_lblk_t) (sbi)->s_cluster_ratio - 1))
|
2013-12-20 14:29:35 +00:00
|
|
|
/* Get the cluster offset */
|
|
|
|
#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
|
|
|
|
((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
|
|
|
|
#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
|
|
|
|
((ext4_lblk_t) (s)->s_cluster_ratio - 1))
|
2011-09-09 22:44:51 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Structure of a blocks group descriptor
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_group_desc
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
2007-10-16 22:38:25 +00:00
|
|
|
__le32 bg_block_bitmap_lo; /* Blocks bitmap block */
|
2007-10-16 22:38:25 +00:00
|
|
|
__le32 bg_inode_bitmap_lo; /* Inodes bitmap block */
|
|
|
|
__le32 bg_inode_table_lo; /* Inodes table block */
|
2009-01-06 03:20:24 +00:00
|
|
|
__le16 bg_free_blocks_count_lo;/* Free blocks count */
|
|
|
|
__le16 bg_free_inodes_count_lo;/* Free inodes count */
|
|
|
|
__le16 bg_used_dirs_count_lo; /* Directories count */
|
Ext4: Uninitialized Block Groups
In pass1 of e2fsck, every inode table in the fileystem is scanned and checked,
regardless of whether it is in use. This is this the most time consuming part
of the filesystem check. The unintialized block group feature can greatly
reduce e2fsck time by eliminating checking of uninitialized inodes.
With this feature, there is a a high water mark of used inodes for each block
group. Block and inode bitmaps can be uninitialized on disk via a flag in the
group descriptor to avoid reading or scanning them at e2fsck time. A checksum
of each group descriptor is used to ensure that corruption in the group
descriptor's bit flags does not cause incorrect operation.
The feature is enabled through a mkfs option
mke2fs /dev/ -O uninit_groups
A patch adding support for uninitialized block groups to e2fsprogs tools has
been posted to the linux-ext4 mailing list.
The patches have been stress tested with fsstress and fsx. In performance
tests testing e2fsck time, we have seen that e2fsck time on ext3 grows
linearly with the total number of inodes in the filesytem. In ext4 with the
uninitialized block groups feature, the e2fsck time is constant, based
solely on the number of used inodes rather than the total inode count.
Since typical ext4 filesystems only use 1-10% of their inodes, this feature can
greatly reduce e2fsck time for users. With performance improvement of 2-20
times, depending on how full the filesystem is.
The attached graph shows the major improvements in e2fsck times in filesystems
with a large total inode count, but few inodes in use.
In each group descriptor if we have
EXT4_BG_INODE_UNINIT set in bg_flags:
Inode table is not initialized/used in this group. So we can skip
the consistency check during fsck.
EXT4_BG_BLOCK_UNINIT set in bg_flags:
No block in the group is used. So we can skip the block bitmap
verification for this group.
We also add two new fields to group descriptor as a part of
uninitialized group patch.
__le16 bg_itable_unused; /* Unused inodes count */
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
bg_itable_unused:
If we have EXT4_BG_INODE_UNINIT not set in bg_flags
then bg_itable_unused will give the offset within
the inode table till the inodes are used. This can be
used by fsck to skip list of inodes that are marked unused.
bg_checksum:
Now that we depend on bg_flags and bg_itable_unused to determine
the block and inode usage, we need to make sure group descriptor
is not corrupt. We add checksum to group descriptor to
detect corruption. If the descriptor is found to be corrupt, we
mark all the blocks and inodes in the group used.
Signed-off-by: Avantika Mathur <mathur@us.ibm.com>
Signed-off-by: Andreas Dilger <adilger@clusterfs.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
2007-10-16 22:38:25 +00:00
|
|
|
__le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */
|
2012-04-29 22:23:10 +00:00
|
|
|
__le32 bg_exclude_bitmap_lo; /* Exclude bitmap for snapshots */
|
|
|
|
__le16 bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */
|
|
|
|
__le16 bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */
|
2009-01-06 03:20:24 +00:00
|
|
|
__le16 bg_itable_unused_lo; /* Unused inodes count */
|
Ext4: Uninitialized Block Groups
In pass1 of e2fsck, every inode table in the fileystem is scanned and checked,
regardless of whether it is in use. This is this the most time consuming part
of the filesystem check. The unintialized block group feature can greatly
reduce e2fsck time by eliminating checking of uninitialized inodes.
With this feature, there is a a high water mark of used inodes for each block
group. Block and inode bitmaps can be uninitialized on disk via a flag in the
group descriptor to avoid reading or scanning them at e2fsck time. A checksum
of each group descriptor is used to ensure that corruption in the group
descriptor's bit flags does not cause incorrect operation.
The feature is enabled through a mkfs option
mke2fs /dev/ -O uninit_groups
A patch adding support for uninitialized block groups to e2fsprogs tools has
been posted to the linux-ext4 mailing list.
The patches have been stress tested with fsstress and fsx. In performance
tests testing e2fsck time, we have seen that e2fsck time on ext3 grows
linearly with the total number of inodes in the filesytem. In ext4 with the
uninitialized block groups feature, the e2fsck time is constant, based
solely on the number of used inodes rather than the total inode count.
Since typical ext4 filesystems only use 1-10% of their inodes, this feature can
greatly reduce e2fsck time for users. With performance improvement of 2-20
times, depending on how full the filesystem is.
The attached graph shows the major improvements in e2fsck times in filesystems
with a large total inode count, but few inodes in use.
In each group descriptor if we have
EXT4_BG_INODE_UNINIT set in bg_flags:
Inode table is not initialized/used in this group. So we can skip
the consistency check during fsck.
EXT4_BG_BLOCK_UNINIT set in bg_flags:
No block in the group is used. So we can skip the block bitmap
verification for this group.
We also add two new fields to group descriptor as a part of
uninitialized group patch.
__le16 bg_itable_unused; /* Unused inodes count */
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
bg_itable_unused:
If we have EXT4_BG_INODE_UNINIT not set in bg_flags
then bg_itable_unused will give the offset within
the inode table till the inodes are used. This can be
used by fsck to skip list of inodes that are marked unused.
bg_checksum:
Now that we depend on bg_flags and bg_itable_unused to determine
the block and inode usage, we need to make sure group descriptor
is not corrupt. We add checksum to group descriptor to
detect corruption. If the descriptor is found to be corrupt, we
mark all the blocks and inodes in the group used.
Signed-off-by: Avantika Mathur <mathur@us.ibm.com>
Signed-off-by: Andreas Dilger <adilger@clusterfs.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
2007-10-16 22:38:25 +00:00
|
|
|
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
|
2006-10-11 08:21:15 +00:00
|
|
|
__le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */
|
|
|
|
__le32 bg_inode_bitmap_hi; /* Inodes bitmap block MSB */
|
|
|
|
__le32 bg_inode_table_hi; /* Inodes table block MSB */
|
2008-01-29 04:58:27 +00:00
|
|
|
__le16 bg_free_blocks_count_hi;/* Free blocks count MSB */
|
|
|
|
__le16 bg_free_inodes_count_hi;/* Free inodes count MSB */
|
|
|
|
__le16 bg_used_dirs_count_hi; /* Directories count MSB */
|
2009-01-06 03:20:24 +00:00
|
|
|
__le16 bg_itable_unused_hi; /* Unused inodes count MSB */
|
2012-04-29 22:23:10 +00:00
|
|
|
__le32 bg_exclude_bitmap_hi; /* Exclude bitmap block MSB */
|
|
|
|
__le16 bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */
|
|
|
|
__le16 bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */
|
|
|
|
__u32 bg_reserved;
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2012-04-29 22:33:10 +00:00
|
|
|
#define EXT4_BG_INODE_BITMAP_CSUM_HI_END \
|
|
|
|
(offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \
|
|
|
|
sizeof(__le16))
|
|
|
|
#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END \
|
|
|
|
(offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \
|
|
|
|
sizeof(__le16))
|
|
|
|
|
2008-07-11 23:27:31 +00:00
|
|
|
/*
|
|
|
|
* Structure of a flex block group info
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct flex_groups {
|
2013-03-12 03:39:59 +00:00
|
|
|
atomic64_t free_clusters;
|
|
|
|
atomic_t free_inodes;
|
|
|
|
atomic_t used_dirs;
|
2008-07-11 23:27:31 +00:00
|
|
|
};
|
|
|
|
|
Ext4: Uninitialized Block Groups
In pass1 of e2fsck, every inode table in the fileystem is scanned and checked,
regardless of whether it is in use. This is this the most time consuming part
of the filesystem check. The unintialized block group feature can greatly
reduce e2fsck time by eliminating checking of uninitialized inodes.
With this feature, there is a a high water mark of used inodes for each block
group. Block and inode bitmaps can be uninitialized on disk via a flag in the
group descriptor to avoid reading or scanning them at e2fsck time. A checksum
of each group descriptor is used to ensure that corruption in the group
descriptor's bit flags does not cause incorrect operation.
The feature is enabled through a mkfs option
mke2fs /dev/ -O uninit_groups
A patch adding support for uninitialized block groups to e2fsprogs tools has
been posted to the linux-ext4 mailing list.
The patches have been stress tested with fsstress and fsx. In performance
tests testing e2fsck time, we have seen that e2fsck time on ext3 grows
linearly with the total number of inodes in the filesytem. In ext4 with the
uninitialized block groups feature, the e2fsck time is constant, based
solely on the number of used inodes rather than the total inode count.
Since typical ext4 filesystems only use 1-10% of their inodes, this feature can
greatly reduce e2fsck time for users. With performance improvement of 2-20
times, depending on how full the filesystem is.
The attached graph shows the major improvements in e2fsck times in filesystems
with a large total inode count, but few inodes in use.
In each group descriptor if we have
EXT4_BG_INODE_UNINIT set in bg_flags:
Inode table is not initialized/used in this group. So we can skip
the consistency check during fsck.
EXT4_BG_BLOCK_UNINIT set in bg_flags:
No block in the group is used. So we can skip the block bitmap
verification for this group.
We also add two new fields to group descriptor as a part of
uninitialized group patch.
__le16 bg_itable_unused; /* Unused inodes count */
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
bg_itable_unused:
If we have EXT4_BG_INODE_UNINIT not set in bg_flags
then bg_itable_unused will give the offset within
the inode table till the inodes are used. This can be
used by fsck to skip list of inodes that are marked unused.
bg_checksum:
Now that we depend on bg_flags and bg_itable_unused to determine
the block and inode usage, we need to make sure group descriptor
is not corrupt. We add checksum to group descriptor to
detect corruption. If the descriptor is found to be corrupt, we
mark all the blocks and inodes in the group used.
Signed-off-by: Avantika Mathur <mathur@us.ibm.com>
Signed-off-by: Andreas Dilger <adilger@clusterfs.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
2007-10-16 22:38:25 +00:00
|
|
|
#define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */
|
|
|
|
#define EXT4_BG_BLOCK_UNINIT 0x0002 /* Block bitmap not in use */
|
|
|
|
#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Macro-instructions used to manage group descriptors
|
|
|
|
*/
|
2006-10-11 08:21:14 +00:00
|
|
|
#define EXT4_MIN_DESC_SIZE 32
|
2006-10-11 08:21:15 +00:00
|
|
|
#define EXT4_MIN_DESC_SIZE_64BIT 64
|
2006-10-11 08:21:14 +00:00
|
|
|
#define EXT4_MAX_DESC_SIZE EXT4_MIN_BLOCK_SIZE
|
|
|
|
#define EXT4_DESC_SIZE(s) (EXT4_SB(s)->s_desc_size)
|
2006-10-11 08:20:50 +00:00
|
|
|
#ifdef __KERNEL__
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCKS_PER_GROUP(s) (EXT4_SB(s)->s_blocks_per_group)
|
2011-09-09 22:34:51 +00:00
|
|
|
# define EXT4_CLUSTERS_PER_GROUP(s) (EXT4_SB(s)->s_clusters_per_group)
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_DESC_PER_BLOCK(s) (EXT4_SB(s)->s_desc_per_block)
|
|
|
|
# define EXT4_INODES_PER_GROUP(s) (EXT4_SB(s)->s_inodes_per_group)
|
|
|
|
# define EXT4_DESC_PER_BLOCK_BITS(s) (EXT4_SB(s)->s_desc_per_block_bits)
|
2006-10-11 08:20:50 +00:00
|
|
|
#else
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_BLOCKS_PER_GROUP(s) ((s)->s_blocks_per_group)
|
2006-10-11 08:21:14 +00:00
|
|
|
# define EXT4_DESC_PER_BLOCK(s) (EXT4_BLOCK_SIZE(s) / EXT4_DESC_SIZE(s))
|
2006-10-11 08:20:53 +00:00
|
|
|
# define EXT4_INODES_PER_GROUP(s) ((s)->s_inodes_per_group)
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Constants relative to the data blocks
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_NDIR_BLOCKS 12
|
|
|
|
#define EXT4_IND_BLOCK EXT4_NDIR_BLOCKS
|
|
|
|
#define EXT4_DIND_BLOCK (EXT4_IND_BLOCK + 1)
|
|
|
|
#define EXT4_TIND_BLOCK (EXT4_DIND_BLOCK + 1)
|
|
|
|
#define EXT4_N_BLOCKS (EXT4_TIND_BLOCK + 1)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Inode flags
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_SECRM_FL 0x00000001 /* Secure deletion */
|
|
|
|
#define EXT4_UNRM_FL 0x00000002 /* Undelete */
|
|
|
|
#define EXT4_COMPR_FL 0x00000004 /* Compress file */
|
|
|
|
#define EXT4_SYNC_FL 0x00000008 /* Synchronous updates */
|
|
|
|
#define EXT4_IMMUTABLE_FL 0x00000010 /* Immutable file */
|
|
|
|
#define EXT4_APPEND_FL 0x00000020 /* writes to file may only append */
|
|
|
|
#define EXT4_NODUMP_FL 0x00000040 /* do not dump file */
|
|
|
|
#define EXT4_NOATIME_FL 0x00000080 /* do not update atime */
|
2006-10-11 08:20:50 +00:00
|
|
|
/* Reserved for compression usage... */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DIRTY_FL 0x00000100
|
|
|
|
#define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
|
|
|
|
#define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */
|
2015-01-19 21:00:58 +00:00
|
|
|
/* nb: was previously EXT2_ECOMPR_FL */
|
|
|
|
#define EXT4_ENCRYPT_FL 0x00000800 /* encrypted file */
|
2006-10-11 08:20:50 +00:00
|
|
|
/* End compression flags --- maybe not all used */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */
|
|
|
|
#define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */
|
|
|
|
#define EXT4_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
|
|
|
|
#define EXT4_NOTAIL_FL 0x00008000 /* file tail should not be merged */
|
|
|
|
#define EXT4_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
|
|
|
|
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
|
2008-01-29 04:58:27 +00:00
|
|
|
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
|
2006-10-11 08:21:03 +00:00
|
|
|
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
|
2019-07-22 16:26:24 +00:00
|
|
|
#define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */
|
2010-01-25 08:31:32 +00:00
|
|
|
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
|
ext4: remove EXT4_EOFBLOCKS_FL and associated code
The EXT4_EOFBLOCKS_FL inode flag is used to indicate whether a file
contains unwritten blocks past i_size. It's set when ext4_fallocate
is called with the KEEP_SIZE flag to extend a file with an unwritten
extent. However, this flag hasn't been useful functionally since
March, 2012, when a decision was made to remove it from ext4.
All traces of EXT4_EOFBLOCKS_FL were removed from e2fsprogs version
1.42.2 by commit 010dc7b90d97 ("e2fsck: remove EXT4_EOFBLOCKS_FL flag
handling") at that time. Now that enough time has passed to make
e2fsprogs versions containing this modification common, this patch now
removes the code associated with EXT4_EOFBLOCKS_FL from the kernel as
well.
This change has two implications. First, because pre-1.42.2 e2fsck
versions only look for a problem if EXT4_EOFBLOCKS_FL is set, and
because that bit will never be set by newer kernels containing this
patch, old versions of e2fsck won't have a compatibility problem with
files created by newer kernels.
Second, newer kernels will not clear EXT4_EOFBLOCKS_FL inode flag bits
belonging to a file written by an older kernel. If set, it will remain
in that state until the file is deleted. Because e2fsck versions since
1.42.2 don't check the flag at all, no adverse effect is expected.
However, pre-1.42.2 e2fsck versions that do check the flag may report
that it is set when it ought not to be after a file has been truncated
or had its unwritten blocks written. In this case, the old version of
e2fsck will offer to clear the flag. No adverse effect would then
occur whether the user chooses to clear the flag or not.
Signed-off-by: Eric Whitney <enwlinux@gmail.com>
Link: https://lore.kernel.org/r/20200211210216.24960-1-enwlinux@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2020-02-11 21:02:16 +00:00
|
|
|
/* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
|
2020-05-28 15:00:02 +00:00
|
|
|
|
|
|
|
#define EXT4_DAX_FL 0x02000000 /* Inode is DAX */
|
|
|
|
|
2012-12-10 19:04:46 +00:00
|
|
|
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
|
2015-10-17 20:15:18 +00:00
|
|
|
#define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
|
2020-05-10 21:52:52 +00:00
|
|
|
#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */
|
2008-01-29 04:58:27 +00:00
|
|
|
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2020-07-13 03:10:12 +00:00
|
|
|
/* User modifiable flags */
|
|
|
|
#define EXT4_FL_USER_MODIFIABLE (EXT4_SECRM_FL | \
|
|
|
|
EXT4_UNRM_FL | \
|
|
|
|
EXT4_COMPR_FL | \
|
|
|
|
EXT4_SYNC_FL | \
|
|
|
|
EXT4_IMMUTABLE_FL | \
|
|
|
|
EXT4_APPEND_FL | \
|
|
|
|
EXT4_NODUMP_FL | \
|
|
|
|
EXT4_NOATIME_FL | \
|
|
|
|
EXT4_JOURNAL_DATA_FL | \
|
|
|
|
EXT4_NOTAIL_FL | \
|
|
|
|
EXT4_DIRSYNC_FL | \
|
|
|
|
EXT4_TOPDIR_FL | \
|
|
|
|
EXT4_EXTENTS_FL | \
|
|
|
|
0x00400000 /* EXT4_EOFBLOCKS_FL */ | \
|
|
|
|
EXT4_DAX_FL | \
|
|
|
|
EXT4_PROJINHERIT_FL | \
|
|
|
|
EXT4_CASEFOLD_FL)
|
|
|
|
|
|
|
|
/* User visible flags */
|
|
|
|
#define EXT4_FL_USER_VISIBLE (EXT4_FL_USER_MODIFIABLE | \
|
|
|
|
EXT4_DIRTY_FL | \
|
|
|
|
EXT4_COMPRBLK_FL | \
|
|
|
|
EXT4_NOCOMPR_FL | \
|
|
|
|
EXT4_ENCRYPT_FL | \
|
|
|
|
EXT4_INDEX_FL | \
|
|
|
|
EXT4_VERITY_FL | \
|
|
|
|
EXT4_INLINE_DATA_FL)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2009-02-15 23:57:26 +00:00
|
|
|
/* Flags that should be inherited by new inodes from their parent. */
|
|
|
|
#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
|
2011-08-31 15:54:51 +00:00
|
|
|
EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
|
2009-02-15 23:57:26 +00:00
|
|
|
EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
|
2016-01-08 21:01:21 +00:00
|
|
|
EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL |\
|
2020-05-28 15:00:02 +00:00
|
|
|
EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL |\
|
|
|
|
EXT4_DAX_FL)
|
2009-02-15 23:57:26 +00:00
|
|
|
|
2009-02-15 23:09:20 +00:00
|
|
|
/* Flags that are appropriate for regular files (all but dir-specific ones). */
|
2019-06-10 04:13:32 +00:00
|
|
|
#define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL | EXT4_CASEFOLD_FL |\
|
|
|
|
EXT4_PROJINHERIT_FL))
|
2009-02-15 23:09:20 +00:00
|
|
|
|
|
|
|
/* Flags that are appropriate for non-directories/regular files. */
|
|
|
|
#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
|
|
|
|
|
2019-02-11 05:35:06 +00:00
|
|
|
/* The only flags that should be swapped */
|
|
|
|
#define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
|
|
|
|
|
2020-05-28 15:00:02 +00:00
|
|
|
/* Flags which are mutually exclusive to DAX */
|
|
|
|
#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
|
2020-08-28 08:43:30 +00:00
|
|
|
EXT4_JOURNAL_DATA_FL | EXT4_INLINE_DATA_FL)
|
2020-05-28 15:00:02 +00:00
|
|
|
|
2009-02-15 23:09:20 +00:00
|
|
|
/* Mask out flags that are inappropriate for the given type of inode. */
|
|
|
|
static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
|
|
|
|
{
|
|
|
|
if (S_ISDIR(mode))
|
|
|
|
return flags;
|
|
|
|
else if (S_ISREG(mode))
|
|
|
|
return flags & EXT4_REG_FLMASK;
|
|
|
|
else
|
|
|
|
return flags & EXT4_OTHER_FLMASK;
|
|
|
|
}
|
|
|
|
|
2010-05-17 02:00:00 +00:00
|
|
|
/*
|
|
|
|
* Inode flags used for atomic set/get
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
EXT4_INODE_SECRM = 0, /* Secure deletion */
|
|
|
|
EXT4_INODE_UNRM = 1, /* Undelete */
|
|
|
|
EXT4_INODE_COMPR = 2, /* Compress file */
|
|
|
|
EXT4_INODE_SYNC = 3, /* Synchronous updates */
|
|
|
|
EXT4_INODE_IMMUTABLE = 4, /* Immutable file */
|
|
|
|
EXT4_INODE_APPEND = 5, /* writes to file may only append */
|
|
|
|
EXT4_INODE_NODUMP = 6, /* do not dump file */
|
|
|
|
EXT4_INODE_NOATIME = 7, /* do not update atime */
|
|
|
|
/* Reserved for compression usage... */
|
|
|
|
EXT4_INODE_DIRTY = 8,
|
|
|
|
EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
|
|
|
|
EXT4_INODE_NOCOMPR = 10, /* Don't compress */
|
2015-04-11 11:44:12 +00:00
|
|
|
EXT4_INODE_ENCRYPT = 11, /* Encrypted file */
|
2010-05-17 02:00:00 +00:00
|
|
|
/* End compression flags --- maybe not all used */
|
|
|
|
EXT4_INODE_INDEX = 12, /* hash-indexed directory */
|
|
|
|
EXT4_INODE_IMAGIC = 13, /* AFS directory */
|
|
|
|
EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */
|
|
|
|
EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */
|
|
|
|
EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */
|
|
|
|
EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
|
|
|
|
EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
|
|
|
|
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
|
2019-07-22 16:26:24 +00:00
|
|
|
EXT4_INODE_VERITY = 20, /* Verity protected inode */
|
2010-05-17 02:00:00 +00:00
|
|
|
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
|
ext4: remove EXT4_EOFBLOCKS_FL and associated code
The EXT4_EOFBLOCKS_FL inode flag is used to indicate whether a file
contains unwritten blocks past i_size. It's set when ext4_fallocate
is called with the KEEP_SIZE flag to extend a file with an unwritten
extent. However, this flag hasn't been useful functionally since
March, 2012, when a decision was made to remove it from ext4.
All traces of EXT4_EOFBLOCKS_FL were removed from e2fsprogs version
1.42.2 by commit 010dc7b90d97 ("e2fsck: remove EXT4_EOFBLOCKS_FL flag
handling") at that time. Now that enough time has passed to make
e2fsprogs versions containing this modification common, this patch now
removes the code associated with EXT4_EOFBLOCKS_FL from the kernel as
well.
This change has two implications. First, because pre-1.42.2 e2fsck
versions only look for a problem if EXT4_EOFBLOCKS_FL is set, and
because that bit will never be set by newer kernels containing this
patch, old versions of e2fsck won't have a compatibility problem with
files created by newer kernels.
Second, newer kernels will not clear EXT4_EOFBLOCKS_FL inode flag bits
belonging to a file written by an older kernel. If set, it will remain
in that state until the file is deleted. Because e2fsck versions since
1.42.2 don't check the flag at all, no adverse effect is expected.
However, pre-1.42.2 e2fsck versions that do check the flag may report
that it is set when it ought not to be after a file has been truncated
or had its unwritten blocks written. In this case, the old version of
e2fsck will offer to clear the flag. No adverse effect would then
occur whether the user chooses to clear the flag or not.
Signed-off-by: Eric Whitney <enwlinux@gmail.com>
Link: https://lore.kernel.org/r/20200211210216.24960-1-enwlinux@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2020-02-11 21:02:16 +00:00
|
|
|
/* 22 was formerly EXT4_INODE_EOFBLOCKS */
|
2020-05-28 15:00:02 +00:00
|
|
|
EXT4_INODE_DAX = 25, /* Inode is DAX */
|
2012-12-10 19:04:46 +00:00
|
|
|
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
|
2015-10-17 20:15:18 +00:00
|
|
|
EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */
|
2020-05-10 21:52:52 +00:00
|
|
|
EXT4_INODE_CASEFOLD = 30, /* Casefolded directory */
|
2010-05-17 02:00:00 +00:00
|
|
|
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
ext4: ensure Inode flags consistency are checked at build time
Flags being used by atomic operations in inode flags (e.g.
ext4_test_inode_flag(), should be consistent with that actually stored
in inodes, i.e.: EXT4_XXX_FL.
It ensures that this consistency is checked at build-time, not at
run-time.
Currently, the flags consistency are being checked at run-time, but,
there is no real reason to not do a build-time check instead of a
run-time check. The code is comparing macro defined values with enum
type variables, where both are constants, so, there is no problem in
comparing constants at build-time.
enum variables are treated as constants by the C compiler, according
to the C99 specs (see www.open-std.org/jtc1/sc22/wg14/www/docs/n1124.pdf
sec. 6.2.5, item 16), so, there is no real problem in comparing an
enumeration type at build time
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2012-12-10 21:30:45 +00:00
|
|
|
* Since it's pretty easy to mix up bit numbers and hex values, we use a
|
|
|
|
* build-time check to make sure that EXT4_XXX_FL is consistent with respect to
|
|
|
|
* EXT4_INODE_XXX. If all is well, the macros will be dropped, so, it won't cost
|
|
|
|
* any extra space in the compiled kernel image, otherwise, the build will fail.
|
|
|
|
* It's important that these values are the same, since we are using
|
|
|
|
* EXT4_INODE_XXX to test for flag values, but EXT4_XXX_FL must be consistent
|
|
|
|
* with the values of FS_XXX_FL defined in include/linux/fs.h and the on-disk
|
|
|
|
* values found in ext2, ext3 and ext4 filesystems, and of course the values
|
|
|
|
* defined in e2fsprogs.
|
2010-05-17 02:00:00 +00:00
|
|
|
*
|
|
|
|
* It's not paranoia if the Murphy's Law really *is* out to get you. :-)
|
|
|
|
*/
|
2022-10-31 05:58:33 +00:00
|
|
|
#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1U << EXT4_INODE_##FLAG))
|
ext4: ensure Inode flags consistency are checked at build time
Flags being used by atomic operations in inode flags (e.g.
ext4_test_inode_flag(), should be consistent with that actually stored
in inodes, i.e.: EXT4_XXX_FL.
It ensures that this consistency is checked at build-time, not at
run-time.
Currently, the flags consistency are being checked at run-time, but,
there is no real reason to not do a build-time check instead of a
run-time check. The code is comparing macro defined values with enum
type variables, where both are constants, so, there is no problem in
comparing constants at build-time.
enum variables are treated as constants by the C compiler, according
to the C99 specs (see www.open-std.org/jtc1/sc22/wg14/www/docs/n1124.pdf
sec. 6.2.5, item 16), so, there is no real problem in comparing an
enumeration type at build time
Signed-off-by: Carlos Maiolino <cmaiolino@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2012-12-10 21:30:45 +00:00
|
|
|
#define CHECK_FLAG_VALUE(FLAG) BUILD_BUG_ON(!TEST_FLAG_VALUE(FLAG))
|
|
|
|
|
2010-05-17 02:00:00 +00:00
|
|
|
static inline void ext4_check_flag_values(void)
|
|
|
|
{
|
|
|
|
CHECK_FLAG_VALUE(SECRM);
|
|
|
|
CHECK_FLAG_VALUE(UNRM);
|
|
|
|
CHECK_FLAG_VALUE(COMPR);
|
|
|
|
CHECK_FLAG_VALUE(SYNC);
|
|
|
|
CHECK_FLAG_VALUE(IMMUTABLE);
|
|
|
|
CHECK_FLAG_VALUE(APPEND);
|
|
|
|
CHECK_FLAG_VALUE(NODUMP);
|
|
|
|
CHECK_FLAG_VALUE(NOATIME);
|
|
|
|
CHECK_FLAG_VALUE(DIRTY);
|
|
|
|
CHECK_FLAG_VALUE(COMPRBLK);
|
|
|
|
CHECK_FLAG_VALUE(NOCOMPR);
|
2015-01-19 21:00:58 +00:00
|
|
|
CHECK_FLAG_VALUE(ENCRYPT);
|
2010-05-17 02:00:00 +00:00
|
|
|
CHECK_FLAG_VALUE(INDEX);
|
|
|
|
CHECK_FLAG_VALUE(IMAGIC);
|
|
|
|
CHECK_FLAG_VALUE(JOURNAL_DATA);
|
|
|
|
CHECK_FLAG_VALUE(NOTAIL);
|
|
|
|
CHECK_FLAG_VALUE(DIRSYNC);
|
|
|
|
CHECK_FLAG_VALUE(TOPDIR);
|
|
|
|
CHECK_FLAG_VALUE(HUGE_FILE);
|
|
|
|
CHECK_FLAG_VALUE(EXTENTS);
|
2019-07-22 16:26:24 +00:00
|
|
|
CHECK_FLAG_VALUE(VERITY);
|
2010-05-17 02:00:00 +00:00
|
|
|
CHECK_FLAG_VALUE(EA_INODE);
|
2012-12-10 19:04:46 +00:00
|
|
|
CHECK_FLAG_VALUE(INLINE_DATA);
|
2015-10-17 20:15:18 +00:00
|
|
|
CHECK_FLAG_VALUE(PROJINHERIT);
|
2020-05-10 21:52:52 +00:00
|
|
|
CHECK_FLAG_VALUE(CASEFOLD);
|
2010-05-17 02:00:00 +00:00
|
|
|
CHECK_FLAG_VALUE(RESERVED);
|
|
|
|
}
|
|
|
|
|
2010-05-17 10:00:00 +00:00
|
|
|
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
|
|
|
|
struct compat_ext4_new_group_input {
|
|
|
|
u32 group;
|
|
|
|
compat_u64 block_bitmap;
|
|
|
|
compat_u64 inode_bitmap;
|
|
|
|
compat_u64 inode_table;
|
|
|
|
u32 blocks_count;
|
|
|
|
u16 reserved_blocks;
|
|
|
|
u16 unused;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
/* The struct ext4_new_group_input in kernel space, with free_blocks_count */
|
|
|
|
struct ext4_new_group_data {
|
2006-10-11 08:20:50 +00:00
|
|
|
__u32 group;
|
2006-10-11 08:21:10 +00:00
|
|
|
__u64 block_bitmap;
|
|
|
|
__u64 inode_bitmap;
|
|
|
|
__u64 inode_table;
|
2006-10-11 08:20:50 +00:00
|
|
|
__u32 blocks_count;
|
|
|
|
__u16 reserved_blocks;
|
2017-10-29 13:38:46 +00:00
|
|
|
__u16 mdata_blocks;
|
|
|
|
__u32 free_clusters_count;
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2012-01-04 04:32:52 +00:00
|
|
|
/* Indexes used to index group tables in ext4_new_group_data */
|
|
|
|
enum {
|
|
|
|
BLOCK_BITMAP = 0, /* block bitmap */
|
|
|
|
INODE_BITMAP, /* inode bitmap */
|
|
|
|
INODE_TABLE, /* inode tables */
|
|
|
|
GROUP_TABLE_COUNT,
|
|
|
|
};
|
|
|
|
|
2007-07-18 01:42:41 +00:00
|
|
|
/*
|
2010-07-27 15:56:07 +00:00
|
|
|
* Flags used by ext4_map_blocks()
|
2007-07-18 01:42:41 +00:00
|
|
|
*/
|
2014-04-21 03:45:47 +00:00
|
|
|
/* Allocate any needed blocks and/or convert an unwritten
|
2009-05-14 04:58:52 +00:00
|
|
|
extent to be an initialized ext4 */
|
2009-05-14 17:57:08 +00:00
|
|
|
#define EXT4_GET_BLOCKS_CREATE 0x0001
|
2014-04-21 03:45:47 +00:00
|
|
|
/* Request the creation of an unwritten extent */
|
|
|
|
#define EXT4_GET_BLOCKS_UNWRIT_EXT 0x0002
|
|
|
|
#define EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT (EXT4_GET_BLOCKS_UNWRIT_EXT|\
|
2009-05-14 04:58:52 +00:00
|
|
|
EXT4_GET_BLOCKS_CREATE)
|
2013-04-10 02:11:22 +00:00
|
|
|
/* Caller is from the delayed allocation writeout path
|
|
|
|
* finally doing the actual allocation of delayed blocks */
|
2009-06-09 04:17:05 +00:00
|
|
|
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
|
2009-09-28 19:49:08 +00:00
|
|
|
/* caller is from the direct IO path, request to creation of an
|
2014-04-21 03:45:47 +00:00
|
|
|
unwritten extents if not allocated, split the unwritten
|
2009-09-28 19:49:08 +00:00
|
|
|
extent if blocks has been preallocated already*/
|
2010-03-02 18:28:44 +00:00
|
|
|
#define EXT4_GET_BLOCKS_PRE_IO 0x0008
|
2010-01-15 06:27:59 +00:00
|
|
|
#define EXT4_GET_BLOCKS_CONVERT 0x0010
|
2010-03-02 18:28:44 +00:00
|
|
|
#define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
|
2014-04-21 03:45:47 +00:00
|
|
|
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
|
2010-03-02 18:28:44 +00:00
|
|
|
/* Convert extent to initialized after IO complete */
|
|
|
|
#define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
|
2014-04-21 03:45:47 +00:00
|
|
|
EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
|
2013-04-10 02:11:22 +00:00
|
|
|
/* Eventual metadata allocation (due to growing extent tree)
|
|
|
|
* should not fail, so try to use reserved blocks for that.*/
|
|
|
|
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
|
2011-05-25 11:41:54 +00:00
|
|
|
/* Don't normalize allocation size (used for fallocate) */
|
|
|
|
#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
|
2014-03-18 22:05:35 +00:00
|
|
|
/* Convert written extents to unwritten */
|
2015-12-07 20:04:57 +00:00
|
|
|
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0100
|
2015-12-07 20:10:26 +00:00
|
|
|
/* Write zeros to newly created written extents */
|
|
|
|
#define EXT4_GET_BLOCKS_ZERO 0x0200
|
|
|
|
#define EXT4_GET_BLOCKS_CREATE_ZERO (EXT4_GET_BLOCKS_CREATE |\
|
|
|
|
EXT4_GET_BLOCKS_ZERO)
|
2016-04-24 04:56:08 +00:00
|
|
|
/* Caller will submit data before dropping transaction handle. This
|
|
|
|
* allows jbd2 to avoid submitting data before commit. */
|
|
|
|
#define EXT4_GET_BLOCKS_IO_SUBMIT 0x0400
|
2022-04-24 14:09:35 +00:00
|
|
|
/* Caller is in the atomic contex, find extent if it has been cached */
|
|
|
|
#define EXT4_GET_BLOCKS_CACHED_NOWAIT 0x0800
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2013-08-17 01:23:41 +00:00
|
|
|
/*
|
2013-08-17 02:05:14 +00:00
|
|
|
* The bit position of these flags must not overlap with any of the
|
2014-09-01 18:43:09 +00:00
|
|
|
* EXT4_GET_BLOCKS_*. They are used by ext4_find_extent(),
|
2013-08-17 01:23:41 +00:00
|
|
|
* read_extent_tree_block(), ext4_split_extent_at(),
|
2013-08-17 02:05:14 +00:00
|
|
|
* ext4_ext_insert_extent(), and ext4_ext_create_new_leaf().
|
|
|
|
* EXT4_EX_NOCACHE is used to indicate that the we shouldn't be
|
|
|
|
* caching the extents when reading from the extent tree while a
|
|
|
|
* truncate or punch hole operation is in progress.
|
2013-08-17 01:23:41 +00:00
|
|
|
*/
|
2014-09-04 22:09:29 +00:00
|
|
|
#define EXT4_EX_NOCACHE 0x40000000
|
|
|
|
#define EXT4_EX_FORCE_CACHE 0x20000000
|
2020-05-07 17:50:28 +00:00
|
|
|
#define EXT4_EX_NOFAIL 0x10000000
|
2013-08-17 01:23:41 +00:00
|
|
|
|
2009-11-23 12:17:05 +00:00
|
|
|
/*
|
|
|
|
* Flags used by ext4_free_blocks
|
|
|
|
*/
|
2018-01-08 04:36:19 +00:00
|
|
|
#define EXT4_FREE_BLOCKS_METADATA 0x0001
|
|
|
|
#define EXT4_FREE_BLOCKS_FORGET 0x0002
|
|
|
|
#define EXT4_FREE_BLOCKS_VALIDATED 0x0004
|
|
|
|
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
|
2011-09-09 22:50:51 +00:00
|
|
|
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
|
|
|
|
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
|
2018-10-01 18:25:08 +00:00
|
|
|
#define EXT4_FREE_BLOCKS_RERESERVE_CLUSTER 0x0040
|
2009-11-23 12:17:05 +00:00
|
|
|
|
2010-05-17 09:00:00 +00:00
|
|
|
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* ioctl commands in 32 bit emulation
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_IOC32_GETVERSION _IOR('f', 3, int)
|
|
|
|
#define EXT4_IOC32_SETVERSION _IOW('f', 4, int)
|
|
|
|
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
|
|
|
|
#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
|
|
|
|
#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
|
2010-05-17 10:00:00 +00:00
|
|
|
#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
|
|
|
|
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
|
2010-05-17 09:00:00 +00:00
|
|
|
#endif
|
2006-10-11 08:20:50 +00:00
|
|
|
|
tree-wide: fix comment/printk typos
"gadget", "through", "command", "maintain", "maintain", "controller", "address",
"between", "initiali[zs]e", "instead", "function", "select", "already",
"equal", "access", "management", "hierarchy", "registration", "interest",
"relative", "memory", "offset", "already",
Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2010-11-01 19:38:34 +00:00
|
|
|
/* Max physical block we can address w/o extents */
|
ext4: limit block allocations for indirect-block files to < 2^32
Today, the ext4 allocator will happily allocate blocks past
2^32 for indirect-block files, which results in the block
numbers getting truncated, and corruption ensues.
This patch limits such allocations to < 2^32, and adds
BUG_ONs if we do get blocks larger than that.
This should address RH Bug 519471, ext4 bitmap allocator
must limit blocks to < 2^32
* ext4_find_goal() is modified to choose a goal < UINT_MAX,
so that our starting point is in an acceptable range.
* ext4_xattr_block_set() is modified such that the goal block
is < UINT_MAX, as above.
* ext4_mb_regular_allocator() is modified so that the group
search does not continue into groups which are too high
* ext4_mb_use_preallocated() has a check that we don't use
preallocated space which is too far out
* ext4_alloc_blocks() and ext4_xattr_block_set() add some BUG_ONs
No attempt has been made to limit inode locations to < 2^32,
so we may wind up with blocks far from their inodes. Doing
this much already will lead to some odd ENOSPC issues when the
"lower 32" gets full, and further restricting inodes could
make that even weirder.
For high inodes, choosing a goal of the original, % UINT_MAX,
may be a bit odd, but then we're in an odd situation anyway,
and I don't know of a better heuristic.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-16 18:45:10 +00:00
|
|
|
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
|
|
|
|
|
2018-09-01 16:45:04 +00:00
|
|
|
/* Max logical block we can support */
|
2020-05-05 15:43:14 +00:00
|
|
|
#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFE
|
2018-09-01 16:45:04 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Structure of an inode on the disk
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_inode {
|
2006-10-11 08:20:50 +00:00
|
|
|
__le16 i_mode; /* File mode */
|
|
|
|
__le16 i_uid; /* Low 16 bits of Owner Uid */
|
2008-01-29 04:58:27 +00:00
|
|
|
__le32 i_size_lo; /* Size in bytes */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 i_atime; /* Access time */
|
2007-07-18 13:15:20 +00:00
|
|
|
__le32 i_ctime; /* Inode Change time */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 i_mtime; /* Modification time */
|
|
|
|
__le32 i_dtime; /* Deletion Time */
|
|
|
|
__le16 i_gid; /* Low 16 bits of Group Id */
|
|
|
|
__le16 i_links_count; /* Links count */
|
2008-01-29 04:58:26 +00:00
|
|
|
__le32 i_blocks_lo; /* Blocks count */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 i_flags; /* File flags */
|
|
|
|
union {
|
|
|
|
struct {
|
2008-01-29 04:58:27 +00:00
|
|
|
__le32 l_i_version;
|
2006-10-11 08:20:50 +00:00
|
|
|
} linux1;
|
|
|
|
struct {
|
|
|
|
__u32 h_i_translator;
|
|
|
|
} hurd1;
|
|
|
|
struct {
|
|
|
|
__u32 m_i_reserved1;
|
|
|
|
} masix1;
|
|
|
|
} osd1; /* OS dependent 1 */
|
2006-10-11 08:20:53 +00:00
|
|
|
__le32 i_block[EXT4_N_BLOCKS];/* Pointers to blocks */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 i_generation; /* File version (for NFS) */
|
2008-01-29 04:58:27 +00:00
|
|
|
__le32 i_file_acl_lo; /* File ACL */
|
2008-01-29 04:58:27 +00:00
|
|
|
__le32 i_size_high;
|
2007-10-16 22:38:25 +00:00
|
|
|
__le32 i_obso_faddr; /* Obsoleted fragment address */
|
2006-10-11 08:20:50 +00:00
|
|
|
union {
|
|
|
|
struct {
|
2008-01-29 04:58:26 +00:00
|
|
|
__le16 l_i_blocks_high; /* were l_i_reserved1 */
|
2006-10-11 08:21:09 +00:00
|
|
|
__le16 l_i_file_acl_high;
|
2007-05-24 17:04:54 +00:00
|
|
|
__le16 l_i_uid_high; /* these 2 fields */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le16 l_i_gid_high; /* were reserved2[0] */
|
2012-04-29 22:23:10 +00:00
|
|
|
__le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
|
|
|
|
__le16 l_i_reserved;
|
2006-10-11 08:20:50 +00:00
|
|
|
} linux2;
|
|
|
|
struct {
|
2007-10-16 22:38:25 +00:00
|
|
|
__le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
|
2006-10-11 08:20:50 +00:00
|
|
|
__u16 h_i_mode_high;
|
|
|
|
__u16 h_i_uid_high;
|
|
|
|
__u16 h_i_gid_high;
|
|
|
|
__u32 h_i_author;
|
|
|
|
} hurd2;
|
|
|
|
struct {
|
2007-10-16 22:38:25 +00:00
|
|
|
__le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
|
2006-10-11 08:21:09 +00:00
|
|
|
__le16 m_i_file_acl_high;
|
2006-10-11 08:20:50 +00:00
|
|
|
__u32 m_i_reserved2[2];
|
|
|
|
} masix2;
|
|
|
|
} osd2; /* OS dependent 2 */
|
|
|
|
__le16 i_extra_isize;
|
2012-04-29 22:23:10 +00:00
|
|
|
__le16 i_checksum_hi; /* crc32c(uuid+inum+inode) BE */
|
2007-07-18 13:15:20 +00:00
|
|
|
__le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
|
|
|
|
__le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
|
|
|
|
__le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
|
|
|
|
__le32 i_crtime; /* File Creation time */
|
|
|
|
__le32 i_crtime_extra; /* extra FileCreationtime (nsec << 2 | epoch) */
|
2008-01-29 04:58:27 +00:00
|
|
|
__le32 i_version_hi; /* high 32 bits for 64-bit version */
|
2015-10-17 20:15:18 +00:00
|
|
|
__le32 i_projid; /* Project ID */
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2007-07-18 13:15:20 +00:00
|
|
|
#define EXT4_EPOCH_BITS 2
|
|
|
|
#define EXT4_EPOCH_MASK ((1 << EXT4_EPOCH_BITS) - 1)
|
|
|
|
#define EXT4_NSEC_MASK (~0UL << EXT4_EPOCH_BITS)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extended fields will fit into an inode if the filesystem was formatted
|
|
|
|
* with large inodes (-I 256 or larger) and there are not currently any EAs
|
|
|
|
* consuming all of the available space. For new inodes we always reserve
|
|
|
|
* enough space for the kernel's known extended fields, but for inodes
|
|
|
|
* created with an old kernel this might not have been the case. None of
|
|
|
|
* the extended inode fields is critical for correct filesystem operation.
|
|
|
|
* This macro checks if a certain field fits in the inode. Note that
|
|
|
|
* inode-size = GOOD_OLD_INODE_SIZE + i_extra_isize
|
|
|
|
*/
|
|
|
|
#define EXT4_FITS_IN_INODE(ext4_inode, einode, field) \
|
|
|
|
((offsetof(typeof(*ext4_inode), field) + \
|
|
|
|
sizeof((ext4_inode)->field)) \
|
|
|
|
<= (EXT4_GOOD_OLD_INODE_SIZE + \
|
|
|
|
(einode)->i_extra_isize)) \
|
|
|
|
|
ext4: Fix handling of extended tv_sec
In ext4, the bottom two bits of {a,c,m}time_extra are used to extend
the {a,c,m}time fields, deferring the year 2038 problem to the year
2446.
When decoding these extended fields, for times whose bottom 32 bits
would represent a negative number, sign extension causes the 64-bit
extended timestamp to be negative as well, which is not what's
intended. This patch corrects that issue, so that the only negative
{a,c,m}times are those between 1901 and 1970 (as per 32-bit signed
timestamps).
Some older kernels might have written pre-1970 dates with 1,1 in the
extra bits. This patch treats those incorrectly-encoded dates as
pre-1970, instead of post-2311, until kernel 4.20 is released.
Hopefully by then e2fsck will have fixed up the bad data.
Also add a comment explaining the encoding of ext4's extra {a,c,m}time
bits.
Signed-off-by: David Turner <novalis@novalis.org>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reported-by: Mark Harris <mh8928@yahoo.com>
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=23732
Cc: stable@vger.kernel.org
2015-11-24 19:34:37 +00:00
|
|
|
/*
|
|
|
|
* We use an encoding that preserves the times for extra epoch "00":
|
|
|
|
*
|
|
|
|
* extra msb of adjust for signed
|
|
|
|
* epoch 32-bit 32-bit tv_sec to
|
|
|
|
* bits time decoded 64-bit tv_sec 64-bit tv_sec valid time range
|
|
|
|
* 0 0 1 -0x80000000..-0x00000001 0x000000000 1901-12-13..1969-12-31
|
|
|
|
* 0 0 0 0x000000000..0x07fffffff 0x000000000 1970-01-01..2038-01-19
|
|
|
|
* 0 1 1 0x080000000..0x0ffffffff 0x100000000 2038-01-19..2106-02-07
|
|
|
|
* 0 1 0 0x100000000..0x17fffffff 0x100000000 2106-02-07..2174-02-25
|
|
|
|
* 1 0 1 0x180000000..0x1ffffffff 0x200000000 2174-02-25..2242-03-16
|
|
|
|
* 1 0 0 0x200000000..0x27fffffff 0x200000000 2242-03-16..2310-04-04
|
|
|
|
* 1 1 1 0x280000000..0x2ffffffff 0x300000000 2310-04-04..2378-04-22
|
|
|
|
* 1 1 0 0x300000000..0x37fffffff 0x300000000 2378-04-22..2446-05-10
|
|
|
|
*
|
|
|
|
* Note that previous versions of the kernel on 64-bit systems would
|
|
|
|
* incorrectly use extra epoch bits 1,1 for dates between 1901 and
|
|
|
|
* 1970. e2fsck will correct this, assuming that it is run on the
|
|
|
|
* affected filesystem before 2242.
|
|
|
|
*/
|
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
static inline __le32 ext4_encode_extra_time(struct timespec64 ts)
|
2007-07-18 13:15:20 +00:00
|
|
|
{
|
2023-07-05 19:01:07 +00:00
|
|
|
u32 extra = ((ts.tv_sec - (s32)ts.tv_sec) >> 32) & EXT4_EPOCH_MASK;
|
|
|
|
return cpu_to_le32(extra | (ts.tv_nsec << EXT4_EPOCH_BITS));
|
2007-07-18 13:15:20 +00:00
|
|
|
}
|
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
static inline struct timespec64 ext4_decode_extra_time(__le32 base,
|
|
|
|
__le32 extra)
|
2007-07-18 13:15:20 +00:00
|
|
|
{
|
2023-07-05 19:01:07 +00:00
|
|
|
struct timespec64 ts = { .tv_sec = (signed)le32_to_cpu(base) };
|
|
|
|
|
2019-08-12 17:44:49 +00:00
|
|
|
if (unlikely(extra & cpu_to_le32(EXT4_EPOCH_MASK)))
|
2023-07-05 19:01:07 +00:00
|
|
|
ts.tv_sec += (u64)(le32_to_cpu(extra) & EXT4_EPOCH_MASK) << 32;
|
|
|
|
ts.tv_nsec = (le32_to_cpu(extra) & EXT4_NSEC_MASK) >> EXT4_EPOCH_BITS;
|
|
|
|
return ts;
|
2007-07-18 13:15:20 +00:00
|
|
|
}
|
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
#define EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, ts) \
|
vfs: change inode times to use struct timespec64
struct timespec is not y2038 safe. Transition vfs to use
y2038 safe struct timespec64 instead.
The change was made with the help of the following cocinelle
script. This catches about 80% of the changes.
All the header file and logic changes are included in the
first 5 rules. The rest are trivial substitutions.
I avoid changing any of the function signatures or any other
filesystem specific data structures to keep the patch simple
for review.
The script can be a little shorter by combining different cases.
But, this version was sufficient for my usecase.
virtual patch
@ depends on patch @
identifier now;
@@
- struct timespec
+ struct timespec64
current_time ( ... )
{
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
...
- return timespec_trunc(
+ return timespec64_trunc(
... );
}
@ depends on patch @
identifier xtime;
@@
struct \( iattr \| inode \| kstat \) {
...
- struct timespec xtime;
+ struct timespec64 xtime;
...
}
@ depends on patch @
identifier t;
@@
struct inode_operations {
...
int (*update_time) (...,
- struct timespec t,
+ struct timespec64 t,
...);
...
}
@ depends on patch @
identifier t;
identifier fn_update_time =~ "update_time$";
@@
fn_update_time (...,
- struct timespec *t,
+ struct timespec64 *t,
...) { ... }
@ depends on patch @
identifier t;
@@
lease_get_mtime( ... ,
- struct timespec *t
+ struct timespec64 *t
) { ... }
@te depends on patch forall@
identifier ts;
local idexpression struct inode *inode_node;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn_update_time =~ "update_time$";
identifier fn;
expression e, E3;
local idexpression struct inode *node1;
local idexpression struct inode *node2;
local idexpression struct iattr *attr1;
local idexpression struct iattr *attr2;
local idexpression struct iattr attr;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
@@
(
(
- struct timespec ts;
+ struct timespec64 ts;
|
- struct timespec ts = current_time(inode_node);
+ struct timespec64 ts = current_time(inode_node);
)
<+... when != ts
(
- timespec_equal(&inode_node->i_xtime, &ts)
+ timespec64_equal(&inode_node->i_xtime, &ts)
|
- timespec_equal(&ts, &inode_node->i_xtime)
+ timespec64_equal(&ts, &inode_node->i_xtime)
|
- timespec_compare(&inode_node->i_xtime, &ts)
+ timespec64_compare(&inode_node->i_xtime, &ts)
|
- timespec_compare(&ts, &inode_node->i_xtime)
+ timespec64_compare(&ts, &inode_node->i_xtime)
|
ts = current_time(e)
|
fn_update_time(..., &ts,...)
|
inode_node->i_xtime = ts
|
node1->i_xtime = ts
|
ts = inode_node->i_xtime
|
<+... attr1->ia_xtime ...+> = ts
|
ts = attr1->ia_xtime
|
ts.tv_sec
|
ts.tv_nsec
|
btrfs_set_stack_timespec_sec(..., ts.tv_sec)
|
btrfs_set_stack_timespec_nsec(..., ts.tv_nsec)
|
- ts = timespec64_to_timespec(
+ ts =
...
-)
|
- ts = ktime_to_timespec(
+ ts = ktime_to_timespec64(
...)
|
- ts = E3
+ ts = timespec_to_timespec64(E3)
|
- ktime_get_real_ts(&ts)
+ ktime_get_real_ts64(&ts)
|
fn(...,
- ts
+ timespec64_to_timespec(ts)
,...)
)
...+>
(
<... when != ts
- return ts;
+ return timespec64_to_timespec(ts);
...>
)
|
- timespec_equal(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_equal(&node1->i_xtime2, &node2->i_xtime2)
|
- timespec_equal(&node1->i_xtime1, &attr2->ia_xtime2)
+ timespec64_equal(&node1->i_xtime2, &attr2->ia_xtime2)
|
- timespec_compare(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_compare(&node1->i_xtime1, &node2->i_xtime2)
|
node1->i_xtime1 =
- timespec_trunc(attr1->ia_xtime1,
+ timespec64_trunc(attr1->ia_xtime1,
...)
|
- attr1->ia_xtime1 = timespec_trunc(attr2->ia_xtime2,
+ attr1->ia_xtime1 = timespec64_trunc(attr2->ia_xtime2,
...)
|
- ktime_get_real_ts(&attr1->ia_xtime1)
+ ktime_get_real_ts64(&attr1->ia_xtime1)
|
- ktime_get_real_ts(&attr.ia_xtime1)
+ ktime_get_real_ts64(&attr.ia_xtime1)
)
@ depends on patch @
struct inode *node;
struct iattr *attr;
identifier fn;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
expression e;
@@
(
- fn(node->i_xtime);
+ fn(timespec64_to_timespec(node->i_xtime));
|
fn(...,
- node->i_xtime);
+ timespec64_to_timespec(node->i_xtime));
|
- e = fn(attr->ia_xtime);
+ e = fn(timespec64_to_timespec(attr->ia_xtime));
)
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
fn (...,
- &attr->ia_xtime,
+ &ts,
...);
)
...+>
}
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
struct kstat *stat;
identifier ia_xtime =~ "^ia_[acm]time$";
identifier i_xtime =~ "^i_[acm]time$";
identifier xtime =~ "^[acm]time$";
identifier fn, ret;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(stat->xtime);
ret = fn (...,
- &stat->xtime);
+ &ts);
)
...+>
}
@ depends on patch @
struct inode *node;
struct inode *node2;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier i_xtime3 =~ "^i_[acm]time$";
struct iattr *attrp;
struct iattr *attrp2;
struct iattr attr ;
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
struct kstat *stat;
struct kstat stat1;
struct timespec64 ts;
identifier xtime =~ "^[acmb]time$";
expression e;
@@
(
( node->i_xtime2 \| attrp->ia_xtime2 \| attr.ia_xtime2 \) = node->i_xtime1 ;
|
node->i_xtime2 = \( node2->i_xtime1 \| timespec64_trunc(...) \);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
stat->xtime = node2->i_xtime1;
|
stat1.xtime = node2->i_xtime1;
|
( node->i_xtime2 \| attrp->ia_xtime2 \) = attrp->ia_xtime1 ;
|
( attrp->ia_xtime1 \| attr.ia_xtime1 \) = attrp2->ia_xtime2;
|
- e = node->i_xtime1;
+ e = timespec64_to_timespec( node->i_xtime1 );
|
- e = attrp->ia_xtime1;
+ e = timespec64_to_timespec( attrp->ia_xtime1 );
|
node->i_xtime1 = current_time(...);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
- node->i_xtime1 = e;
+ node->i_xtime1 = timespec_to_timespec64(e);
)
Signed-off-by: Deepa Dinamani <deepa.kernel@gmail.com>
Cc: <anton@tuxera.com>
Cc: <balbi@kernel.org>
Cc: <bfields@fieldses.org>
Cc: <darrick.wong@oracle.com>
Cc: <dhowells@redhat.com>
Cc: <dsterba@suse.com>
Cc: <dwmw2@infradead.org>
Cc: <hch@lst.de>
Cc: <hirofumi@mail.parknet.co.jp>
Cc: <hubcap@omnibond.com>
Cc: <jack@suse.com>
Cc: <jaegeuk@kernel.org>
Cc: <jaharkes@cs.cmu.edu>
Cc: <jslaby@suse.com>
Cc: <keescook@chromium.org>
Cc: <mark@fasheh.com>
Cc: <miklos@szeredi.hu>
Cc: <nico@linaro.org>
Cc: <reiserfs-devel@vger.kernel.org>
Cc: <richard@nod.at>
Cc: <sage@redhat.com>
Cc: <sfrench@samba.org>
Cc: <swhiteho@redhat.com>
Cc: <tj@kernel.org>
Cc: <trond.myklebust@primarydata.com>
Cc: <tytso@mit.edu>
Cc: <viro@zeniv.linux.org.uk>
2018-05-09 02:36:02 +00:00
|
|
|
do { \
|
2023-07-05 19:01:07 +00:00
|
|
|
if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) { \
|
|
|
|
(raw_inode)->xtime = cpu_to_le32((ts).tv_sec); \
|
|
|
|
(raw_inode)->xtime ## _extra = ext4_encode_extra_time(ts); \
|
|
|
|
} else \
|
|
|
|
(raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
|
2007-07-18 13:15:20 +00:00
|
|
|
} while (0)
|
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
|
|
|
|
EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, (inode)->xtime)
|
|
|
|
|
|
|
|
#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
|
|
|
|
EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
|
|
|
|
|
|
|
|
#define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
|
|
|
|
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
|
|
|
|
EXT4_INODE_SET_XTIME_VAL(xtime, &((einode)->vfs_inode), \
|
|
|
|
raw_inode, (einode)->xtime)
|
|
|
|
|
|
|
|
#define EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode) \
|
|
|
|
(EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra) ? \
|
|
|
|
ext4_decode_extra_time((raw_inode)->xtime, \
|
|
|
|
(raw_inode)->xtime ## _extra) : \
|
|
|
|
(struct timespec64) { \
|
|
|
|
.tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
|
|
|
|
})
|
2007-07-18 13:15:20 +00:00
|
|
|
|
vfs: change inode times to use struct timespec64
struct timespec is not y2038 safe. Transition vfs to use
y2038 safe struct timespec64 instead.
The change was made with the help of the following cocinelle
script. This catches about 80% of the changes.
All the header file and logic changes are included in the
first 5 rules. The rest are trivial substitutions.
I avoid changing any of the function signatures or any other
filesystem specific data structures to keep the patch simple
for review.
The script can be a little shorter by combining different cases.
But, this version was sufficient for my usecase.
virtual patch
@ depends on patch @
identifier now;
@@
- struct timespec
+ struct timespec64
current_time ( ... )
{
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
...
- return timespec_trunc(
+ return timespec64_trunc(
... );
}
@ depends on patch @
identifier xtime;
@@
struct \( iattr \| inode \| kstat \) {
...
- struct timespec xtime;
+ struct timespec64 xtime;
...
}
@ depends on patch @
identifier t;
@@
struct inode_operations {
...
int (*update_time) (...,
- struct timespec t,
+ struct timespec64 t,
...);
...
}
@ depends on patch @
identifier t;
identifier fn_update_time =~ "update_time$";
@@
fn_update_time (...,
- struct timespec *t,
+ struct timespec64 *t,
...) { ... }
@ depends on patch @
identifier t;
@@
lease_get_mtime( ... ,
- struct timespec *t
+ struct timespec64 *t
) { ... }
@te depends on patch forall@
identifier ts;
local idexpression struct inode *inode_node;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn_update_time =~ "update_time$";
identifier fn;
expression e, E3;
local idexpression struct inode *node1;
local idexpression struct inode *node2;
local idexpression struct iattr *attr1;
local idexpression struct iattr *attr2;
local idexpression struct iattr attr;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
@@
(
(
- struct timespec ts;
+ struct timespec64 ts;
|
- struct timespec ts = current_time(inode_node);
+ struct timespec64 ts = current_time(inode_node);
)
<+... when != ts
(
- timespec_equal(&inode_node->i_xtime, &ts)
+ timespec64_equal(&inode_node->i_xtime, &ts)
|
- timespec_equal(&ts, &inode_node->i_xtime)
+ timespec64_equal(&ts, &inode_node->i_xtime)
|
- timespec_compare(&inode_node->i_xtime, &ts)
+ timespec64_compare(&inode_node->i_xtime, &ts)
|
- timespec_compare(&ts, &inode_node->i_xtime)
+ timespec64_compare(&ts, &inode_node->i_xtime)
|
ts = current_time(e)
|
fn_update_time(..., &ts,...)
|
inode_node->i_xtime = ts
|
node1->i_xtime = ts
|
ts = inode_node->i_xtime
|
<+... attr1->ia_xtime ...+> = ts
|
ts = attr1->ia_xtime
|
ts.tv_sec
|
ts.tv_nsec
|
btrfs_set_stack_timespec_sec(..., ts.tv_sec)
|
btrfs_set_stack_timespec_nsec(..., ts.tv_nsec)
|
- ts = timespec64_to_timespec(
+ ts =
...
-)
|
- ts = ktime_to_timespec(
+ ts = ktime_to_timespec64(
...)
|
- ts = E3
+ ts = timespec_to_timespec64(E3)
|
- ktime_get_real_ts(&ts)
+ ktime_get_real_ts64(&ts)
|
fn(...,
- ts
+ timespec64_to_timespec(ts)
,...)
)
...+>
(
<... when != ts
- return ts;
+ return timespec64_to_timespec(ts);
...>
)
|
- timespec_equal(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_equal(&node1->i_xtime2, &node2->i_xtime2)
|
- timespec_equal(&node1->i_xtime1, &attr2->ia_xtime2)
+ timespec64_equal(&node1->i_xtime2, &attr2->ia_xtime2)
|
- timespec_compare(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_compare(&node1->i_xtime1, &node2->i_xtime2)
|
node1->i_xtime1 =
- timespec_trunc(attr1->ia_xtime1,
+ timespec64_trunc(attr1->ia_xtime1,
...)
|
- attr1->ia_xtime1 = timespec_trunc(attr2->ia_xtime2,
+ attr1->ia_xtime1 = timespec64_trunc(attr2->ia_xtime2,
...)
|
- ktime_get_real_ts(&attr1->ia_xtime1)
+ ktime_get_real_ts64(&attr1->ia_xtime1)
|
- ktime_get_real_ts(&attr.ia_xtime1)
+ ktime_get_real_ts64(&attr.ia_xtime1)
)
@ depends on patch @
struct inode *node;
struct iattr *attr;
identifier fn;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
expression e;
@@
(
- fn(node->i_xtime);
+ fn(timespec64_to_timespec(node->i_xtime));
|
fn(...,
- node->i_xtime);
+ timespec64_to_timespec(node->i_xtime));
|
- e = fn(attr->ia_xtime);
+ e = fn(timespec64_to_timespec(attr->ia_xtime));
)
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
fn (...,
- &attr->ia_xtime,
+ &ts,
...);
)
...+>
}
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
struct kstat *stat;
identifier ia_xtime =~ "^ia_[acm]time$";
identifier i_xtime =~ "^i_[acm]time$";
identifier xtime =~ "^[acm]time$";
identifier fn, ret;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(stat->xtime);
ret = fn (...,
- &stat->xtime);
+ &ts);
)
...+>
}
@ depends on patch @
struct inode *node;
struct inode *node2;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier i_xtime3 =~ "^i_[acm]time$";
struct iattr *attrp;
struct iattr *attrp2;
struct iattr attr ;
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
struct kstat *stat;
struct kstat stat1;
struct timespec64 ts;
identifier xtime =~ "^[acmb]time$";
expression e;
@@
(
( node->i_xtime2 \| attrp->ia_xtime2 \| attr.ia_xtime2 \) = node->i_xtime1 ;
|
node->i_xtime2 = \( node2->i_xtime1 \| timespec64_trunc(...) \);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
stat->xtime = node2->i_xtime1;
|
stat1.xtime = node2->i_xtime1;
|
( node->i_xtime2 \| attrp->ia_xtime2 \) = attrp->ia_xtime1 ;
|
( attrp->ia_xtime1 \| attr.ia_xtime1 \) = attrp2->ia_xtime2;
|
- e = node->i_xtime1;
+ e = timespec64_to_timespec( node->i_xtime1 );
|
- e = attrp->ia_xtime1;
+ e = timespec64_to_timespec( attrp->ia_xtime1 );
|
node->i_xtime1 = current_time(...);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
- node->i_xtime1 = e;
+ node->i_xtime1 = timespec_to_timespec64(e);
)
Signed-off-by: Deepa Dinamani <deepa.kernel@gmail.com>
Cc: <anton@tuxera.com>
Cc: <balbi@kernel.org>
Cc: <bfields@fieldses.org>
Cc: <darrick.wong@oracle.com>
Cc: <dhowells@redhat.com>
Cc: <dsterba@suse.com>
Cc: <dwmw2@infradead.org>
Cc: <hch@lst.de>
Cc: <hirofumi@mail.parknet.co.jp>
Cc: <hubcap@omnibond.com>
Cc: <jack@suse.com>
Cc: <jaegeuk@kernel.org>
Cc: <jaharkes@cs.cmu.edu>
Cc: <jslaby@suse.com>
Cc: <keescook@chromium.org>
Cc: <mark@fasheh.com>
Cc: <miklos@szeredi.hu>
Cc: <nico@linaro.org>
Cc: <reiserfs-devel@vger.kernel.org>
Cc: <richard@nod.at>
Cc: <sage@redhat.com>
Cc: <sfrench@samba.org>
Cc: <swhiteho@redhat.com>
Cc: <tj@kernel.org>
Cc: <trond.myklebust@primarydata.com>
Cc: <tytso@mit.edu>
Cc: <viro@zeniv.linux.org.uk>
2018-05-09 02:36:02 +00:00
|
|
|
#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
|
|
|
|
do { \
|
2023-07-05 19:01:07 +00:00
|
|
|
(inode)->xtime = EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode); \
|
2007-07-18 13:15:20 +00:00
|
|
|
} while (0)
|
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
#define EXT4_INODE_GET_CTIME(inode, raw_inode) \
|
|
|
|
do { \
|
|
|
|
inode_set_ctime_to_ts(inode, \
|
|
|
|
EXT4_INODE_GET_XTIME_VAL(i_ctime, inode, raw_inode)); \
|
|
|
|
} while (0)
|
vfs: change inode times to use struct timespec64
struct timespec is not y2038 safe. Transition vfs to use
y2038 safe struct timespec64 instead.
The change was made with the help of the following cocinelle
script. This catches about 80% of the changes.
All the header file and logic changes are included in the
first 5 rules. The rest are trivial substitutions.
I avoid changing any of the function signatures or any other
filesystem specific data structures to keep the patch simple
for review.
The script can be a little shorter by combining different cases.
But, this version was sufficient for my usecase.
virtual patch
@ depends on patch @
identifier now;
@@
- struct timespec
+ struct timespec64
current_time ( ... )
{
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
...
- return timespec_trunc(
+ return timespec64_trunc(
... );
}
@ depends on patch @
identifier xtime;
@@
struct \( iattr \| inode \| kstat \) {
...
- struct timespec xtime;
+ struct timespec64 xtime;
...
}
@ depends on patch @
identifier t;
@@
struct inode_operations {
...
int (*update_time) (...,
- struct timespec t,
+ struct timespec64 t,
...);
...
}
@ depends on patch @
identifier t;
identifier fn_update_time =~ "update_time$";
@@
fn_update_time (...,
- struct timespec *t,
+ struct timespec64 *t,
...) { ... }
@ depends on patch @
identifier t;
@@
lease_get_mtime( ... ,
- struct timespec *t
+ struct timespec64 *t
) { ... }
@te depends on patch forall@
identifier ts;
local idexpression struct inode *inode_node;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn_update_time =~ "update_time$";
identifier fn;
expression e, E3;
local idexpression struct inode *node1;
local idexpression struct inode *node2;
local idexpression struct iattr *attr1;
local idexpression struct iattr *attr2;
local idexpression struct iattr attr;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
@@
(
(
- struct timespec ts;
+ struct timespec64 ts;
|
- struct timespec ts = current_time(inode_node);
+ struct timespec64 ts = current_time(inode_node);
)
<+... when != ts
(
- timespec_equal(&inode_node->i_xtime, &ts)
+ timespec64_equal(&inode_node->i_xtime, &ts)
|
- timespec_equal(&ts, &inode_node->i_xtime)
+ timespec64_equal(&ts, &inode_node->i_xtime)
|
- timespec_compare(&inode_node->i_xtime, &ts)
+ timespec64_compare(&inode_node->i_xtime, &ts)
|
- timespec_compare(&ts, &inode_node->i_xtime)
+ timespec64_compare(&ts, &inode_node->i_xtime)
|
ts = current_time(e)
|
fn_update_time(..., &ts,...)
|
inode_node->i_xtime = ts
|
node1->i_xtime = ts
|
ts = inode_node->i_xtime
|
<+... attr1->ia_xtime ...+> = ts
|
ts = attr1->ia_xtime
|
ts.tv_sec
|
ts.tv_nsec
|
btrfs_set_stack_timespec_sec(..., ts.tv_sec)
|
btrfs_set_stack_timespec_nsec(..., ts.tv_nsec)
|
- ts = timespec64_to_timespec(
+ ts =
...
-)
|
- ts = ktime_to_timespec(
+ ts = ktime_to_timespec64(
...)
|
- ts = E3
+ ts = timespec_to_timespec64(E3)
|
- ktime_get_real_ts(&ts)
+ ktime_get_real_ts64(&ts)
|
fn(...,
- ts
+ timespec64_to_timespec(ts)
,...)
)
...+>
(
<... when != ts
- return ts;
+ return timespec64_to_timespec(ts);
...>
)
|
- timespec_equal(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_equal(&node1->i_xtime2, &node2->i_xtime2)
|
- timespec_equal(&node1->i_xtime1, &attr2->ia_xtime2)
+ timespec64_equal(&node1->i_xtime2, &attr2->ia_xtime2)
|
- timespec_compare(&node1->i_xtime1, &node2->i_xtime2)
+ timespec64_compare(&node1->i_xtime1, &node2->i_xtime2)
|
node1->i_xtime1 =
- timespec_trunc(attr1->ia_xtime1,
+ timespec64_trunc(attr1->ia_xtime1,
...)
|
- attr1->ia_xtime1 = timespec_trunc(attr2->ia_xtime2,
+ attr1->ia_xtime1 = timespec64_trunc(attr2->ia_xtime2,
...)
|
- ktime_get_real_ts(&attr1->ia_xtime1)
+ ktime_get_real_ts64(&attr1->ia_xtime1)
|
- ktime_get_real_ts(&attr.ia_xtime1)
+ ktime_get_real_ts64(&attr.ia_xtime1)
)
@ depends on patch @
struct inode *node;
struct iattr *attr;
identifier fn;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
expression e;
@@
(
- fn(node->i_xtime);
+ fn(timespec64_to_timespec(node->i_xtime));
|
fn(...,
- node->i_xtime);
+ timespec64_to_timespec(node->i_xtime));
|
- e = fn(attr->ia_xtime);
+ e = fn(timespec64_to_timespec(attr->ia_xtime));
)
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
identifier i_xtime =~ "^i_[acm]time$";
identifier ia_xtime =~ "^ia_[acm]time$";
identifier fn;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
fn (...,
- &attr->ia_xtime,
+ &ts,
...);
)
...+>
}
@ depends on patch forall @
struct inode *node;
struct iattr *attr;
struct kstat *stat;
identifier ia_xtime =~ "^ia_[acm]time$";
identifier i_xtime =~ "^i_[acm]time$";
identifier xtime =~ "^[acm]time$";
identifier fn, ret;
@@
{
+ struct timespec ts;
<+...
(
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(node->i_xtime);
ret = fn (...,
- &node->i_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime,
+ &ts,
...);
|
+ ts = timespec64_to_timespec(attr->ia_xtime);
ret = fn (...,
- &attr->ia_xtime);
+ &ts);
|
+ ts = timespec64_to_timespec(stat->xtime);
ret = fn (...,
- &stat->xtime);
+ &ts);
)
...+>
}
@ depends on patch @
struct inode *node;
struct inode *node2;
identifier i_xtime1 =~ "^i_[acm]time$";
identifier i_xtime2 =~ "^i_[acm]time$";
identifier i_xtime3 =~ "^i_[acm]time$";
struct iattr *attrp;
struct iattr *attrp2;
struct iattr attr ;
identifier ia_xtime1 =~ "^ia_[acm]time$";
identifier ia_xtime2 =~ "^ia_[acm]time$";
struct kstat *stat;
struct kstat stat1;
struct timespec64 ts;
identifier xtime =~ "^[acmb]time$";
expression e;
@@
(
( node->i_xtime2 \| attrp->ia_xtime2 \| attr.ia_xtime2 \) = node->i_xtime1 ;
|
node->i_xtime2 = \( node2->i_xtime1 \| timespec64_trunc(...) \);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
node->i_xtime1 = node->i_xtime3 = \(ts \| current_time(...) \);
|
stat->xtime = node2->i_xtime1;
|
stat1.xtime = node2->i_xtime1;
|
( node->i_xtime2 \| attrp->ia_xtime2 \) = attrp->ia_xtime1 ;
|
( attrp->ia_xtime1 \| attr.ia_xtime1 \) = attrp2->ia_xtime2;
|
- e = node->i_xtime1;
+ e = timespec64_to_timespec( node->i_xtime1 );
|
- e = attrp->ia_xtime1;
+ e = timespec64_to_timespec( attrp->ia_xtime1 );
|
node->i_xtime1 = current_time(...);
|
node->i_xtime2 = node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
node->i_xtime1 = node->i_xtime3 =
- e;
+ timespec_to_timespec64(e);
|
- node->i_xtime1 = e;
+ node->i_xtime1 = timespec_to_timespec64(e);
)
Signed-off-by: Deepa Dinamani <deepa.kernel@gmail.com>
Cc: <anton@tuxera.com>
Cc: <balbi@kernel.org>
Cc: <bfields@fieldses.org>
Cc: <darrick.wong@oracle.com>
Cc: <dhowells@redhat.com>
Cc: <dsterba@suse.com>
Cc: <dwmw2@infradead.org>
Cc: <hch@lst.de>
Cc: <hirofumi@mail.parknet.co.jp>
Cc: <hubcap@omnibond.com>
Cc: <jack@suse.com>
Cc: <jaegeuk@kernel.org>
Cc: <jaharkes@cs.cmu.edu>
Cc: <jslaby@suse.com>
Cc: <keescook@chromium.org>
Cc: <mark@fasheh.com>
Cc: <miklos@szeredi.hu>
Cc: <nico@linaro.org>
Cc: <reiserfs-devel@vger.kernel.org>
Cc: <richard@nod.at>
Cc: <sage@redhat.com>
Cc: <sfrench@samba.org>
Cc: <swhiteho@redhat.com>
Cc: <tj@kernel.org>
Cc: <trond.myklebust@primarydata.com>
Cc: <tytso@mit.edu>
Cc: <viro@zeniv.linux.org.uk>
2018-05-09 02:36:02 +00:00
|
|
|
|
2023-07-05 19:01:07 +00:00
|
|
|
#define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \
|
|
|
|
do { \
|
|
|
|
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
|
|
|
|
(einode)->xtime = \
|
|
|
|
EXT4_INODE_GET_XTIME_VAL(xtime, &(einode->vfs_inode), \
|
|
|
|
raw_inode); \
|
|
|
|
else \
|
|
|
|
(einode)->xtime = (struct timespec64){0, 0}; \
|
2007-07-18 13:15:20 +00:00
|
|
|
} while (0)
|
|
|
|
|
2008-01-29 04:58:27 +00:00
|
|
|
#define i_disk_version osd1.linux1.l_i_version
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
#if defined(__KERNEL__) || defined(__linux__)
|
|
|
|
#define i_reserved1 osd1.linux1.l_i_reserved1
|
2006-10-11 08:21:09 +00:00
|
|
|
#define i_file_acl_high osd2.linux2.l_i_file_acl_high
|
2008-01-29 04:58:26 +00:00
|
|
|
#define i_blocks_high osd2.linux2.l_i_blocks_high
|
2006-10-11 08:20:50 +00:00
|
|
|
#define i_uid_low i_uid
|
|
|
|
#define i_gid_low i_gid
|
|
|
|
#define i_uid_high osd2.linux2.l_i_uid_high
|
|
|
|
#define i_gid_high osd2.linux2.l_i_gid_high
|
2012-04-29 22:23:10 +00:00
|
|
|
#define i_checksum_lo osd2.linux2.l_i_checksum_lo
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
#elif defined(__GNU__)
|
|
|
|
|
|
|
|
#define i_translator osd1.hurd1.h_i_translator
|
|
|
|
#define i_uid_high osd2.hurd2.h_i_uid_high
|
|
|
|
#define i_gid_high osd2.hurd2.h_i_gid_high
|
|
|
|
#define i_author osd2.hurd2.h_i_author
|
|
|
|
|
|
|
|
#elif defined(__masix__)
|
|
|
|
|
|
|
|
#define i_reserved1 osd1.masix1.m_i_reserved1
|
2006-10-11 08:21:09 +00:00
|
|
|
#define i_file_acl_high osd2.masix2.m_i_file_acl_high
|
2006-10-11 08:20:50 +00:00
|
|
|
#define i_reserved2 osd2.masix2.m_i_reserved2
|
|
|
|
|
|
|
|
#endif /* defined(__KERNEL__) || defined(__linux__) */
|
|
|
|
|
2012-11-08 20:18:54 +00:00
|
|
|
#include "extents_status.h"
|
2020-10-15 20:37:55 +00:00
|
|
|
#include "fast_commit.h"
|
2012-11-08 20:18:54 +00:00
|
|
|
|
2016-04-01 05:31:28 +00:00
|
|
|
/*
|
|
|
|
* Lock subclasses for i_data_sem in the ext4_inode_info structure.
|
|
|
|
*
|
|
|
|
* These are needed to avoid lockdep false positives when we need to
|
|
|
|
* allocate blocks to the quota inode during ext4_map_blocks(), while
|
|
|
|
* holding i_data_sem for a normal (non-quota) inode. Since we don't
|
|
|
|
* do quota tracking for the quota inode, this avoids deadlock (as
|
|
|
|
* well as infinite recursion, since it isn't turtles all the way
|
|
|
|
* down...)
|
|
|
|
*
|
|
|
|
* I_DATA_SEM_NORMAL - Used for most inodes
|
|
|
|
* I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode
|
|
|
|
* where the second inode has larger inode number
|
|
|
|
* than the first
|
|
|
|
* I_DATA_SEM_QUOTA - Used for quota inodes only
|
2023-05-24 03:49:51 +00:00
|
|
|
* I_DATA_SEM_EA - Used for ea_inodes only
|
2016-04-01 05:31:28 +00:00
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
I_DATA_SEM_NORMAL = 0,
|
|
|
|
I_DATA_SEM_OTHER,
|
|
|
|
I_DATA_SEM_QUOTA,
|
2023-05-24 03:49:51 +00:00
|
|
|
I_DATA_SEM_EA
|
2016-04-01 05:31:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/*
|
|
|
|
* fourth extended file system inode data in memory
|
|
|
|
*/
|
|
|
|
struct ext4_inode_info {
|
|
|
|
__le32 i_data[15]; /* unconverted */
|
|
|
|
__u32 i_dtime;
|
2010-05-17 02:00:00 +00:00
|
|
|
ext4_fsblk_t i_file_acl;
|
2009-05-01 17:44:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* i_block_group is the number of the block group which contains
|
|
|
|
* this file's inode. Constant across the lifetime of the inode,
|
2017-07-31 02:30:11 +00:00
|
|
|
* it is used for making block allocation decisions - we try to
|
2009-05-01 17:44:33 +00:00
|
|
|
* place a file's data blocks near its inode block, and new inodes
|
|
|
|
* near to their parent directory's inode.
|
|
|
|
*/
|
|
|
|
ext4_group_t i_block_group;
|
2011-01-10 17:13:42 +00:00
|
|
|
ext4_lblk_t i_dir_start_lookup;
|
2011-01-10 17:18:25 +00:00
|
|
|
#if (BITS_PER_LONG < 64)
|
2010-01-24 19:34:07 +00:00
|
|
|
unsigned long i_state_flags; /* Dynamic state flags */
|
2011-01-10 17:18:25 +00:00
|
|
|
#endif
|
2010-05-17 02:00:00 +00:00
|
|
|
unsigned long i_flags;
|
2009-05-01 17:44:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extended attributes can be read independently of the main file
|
2022-01-21 07:06:11 +00:00
|
|
|
* data. Taking i_rwsem even when reading would cause contention
|
2009-05-01 17:44:33 +00:00
|
|
|
* between readers of EAs and writers of regular file data, so
|
|
|
|
* instead we synchronize on xattr_sem when reading or changing
|
|
|
|
* EAs.
|
|
|
|
*/
|
|
|
|
struct rw_semaphore xattr_sem;
|
|
|
|
|
2021-08-16 09:57:06 +00:00
|
|
|
/*
|
|
|
|
* Inodes with EXT4_STATE_ORPHAN_FILE use i_orphan_idx. Otherwise
|
|
|
|
* i_orphan is used.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct list_head i_orphan; /* unlinked but open inodes */
|
|
|
|
unsigned int i_orphan_idx; /* Index in orphan file */
|
|
|
|
};
|
2009-05-01 17:44:33 +00:00
|
|
|
|
2020-10-15 20:37:57 +00:00
|
|
|
/* Fast commit related info */
|
|
|
|
|
2022-02-21 07:56:15 +00:00
|
|
|
/* For tracking dentry create updates */
|
|
|
|
struct list_head i_fc_dilist;
|
2020-10-15 20:37:57 +00:00
|
|
|
struct list_head i_fc_list; /*
|
|
|
|
* inodes that need fast commit
|
|
|
|
* protected by sbi->s_fc_lock.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Start of lblk range that needs to be committed in this fast commit */
|
|
|
|
ext4_lblk_t i_fc_lblk_start;
|
|
|
|
|
|
|
|
/* End of lblk range that needs to be committed in this fast commit */
|
|
|
|
ext4_lblk_t i_fc_lblk_len;
|
|
|
|
|
|
|
|
/* Number of ongoing updates on this inode */
|
|
|
|
atomic_t i_fc_updates;
|
|
|
|
|
|
|
|
/* Fast commit wait queue for this inode */
|
|
|
|
wait_queue_head_t i_fc_wait;
|
|
|
|
|
|
|
|
/* Protect concurrent accesses on i_fc_lblk_start, i_fc_lblk_len */
|
|
|
|
struct mutex i_fc_lock;
|
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/*
|
|
|
|
* i_disksize keeps track of what the inode size is ON DISK, not
|
|
|
|
* in memory. During truncate, i_size is set to the new size by
|
|
|
|
* the VFS prior to calling ext4_truncate(), but the filesystem won't
|
|
|
|
* set i_disksize to 0 until the truncate is actually under way.
|
|
|
|
*
|
|
|
|
* The intent is that i_disksize always represents the blocks which
|
|
|
|
* are used by this file. This allows recovery to restart truncate
|
|
|
|
* on orphans if we crash during truncate. We actually write i_disksize
|
|
|
|
* into the on-disk inode when writing inodes out, instead of i_size.
|
|
|
|
*
|
|
|
|
* The only time when i_disksize and i_size may be different is when
|
|
|
|
* a truncate is in progress. The only things which change i_disksize
|
|
|
|
* are ext4_get_block (growth) and ext4_truncate (shrinkth).
|
|
|
|
*/
|
|
|
|
loff_t i_disksize;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* i_data_sem is for serialising ext4_truncate() against
|
|
|
|
* ext4_getblock(). In the 2.4 ext2 design, great chunks of inode's
|
|
|
|
* data tree are chopped off during truncate. We can't do that in
|
|
|
|
* ext4 because whenever we perform intermediate commits during
|
|
|
|
* truncate, the inode and all the metadata blocks *must* be in a
|
|
|
|
* consistent state which allows truncation of the orphans to restart
|
|
|
|
* during recovery. Hence we must fix the get_block-vs-truncate race
|
|
|
|
* by other means, so we have i_data_sem.
|
|
|
|
*/
|
|
|
|
struct rw_semaphore i_data_sem;
|
|
|
|
struct inode vfs_inode;
|
2011-01-10 17:29:43 +00:00
|
|
|
struct jbd2_inode *jinode;
|
2009-05-01 17:44:33 +00:00
|
|
|
|
2014-04-21 18:37:55 +00:00
|
|
|
spinlock_t i_raw_lock; /* protects updates to the raw inode */
|
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/*
|
|
|
|
* File creation time. Its function is same as that of
|
2018-07-29 19:51:00 +00:00
|
|
|
* struct timespec64 i_{a,c,m}time in the generic inode.
|
2009-05-01 17:44:33 +00:00
|
|
|
*/
|
2018-07-29 19:51:00 +00:00
|
|
|
struct timespec64 i_crtime;
|
2009-05-01 17:44:33 +00:00
|
|
|
|
|
|
|
/* mballoc */
|
2020-08-17 07:36:15 +00:00
|
|
|
atomic_t i_prealloc_active;
|
ext4: Use rbtrees to manage PAs instead of inode i_prealloc_list
Currently, the kernel uses i_prealloc_list to hold all the inode
preallocations. This is known to cause degradation in performance in
workloads which perform large number of sparse writes on a single file.
This is mainly because functions like ext4_mb_normalize_request() and
ext4_mb_use_preallocated() iterate over this complete list, resulting in
slowdowns when large number of PAs are present.
Patch 27bc446e2 partially fixed this by enforcing a limit of 512 for
the inode preallocation list and adding logic to continually trim the
list if it grows above the threshold, however our testing revealed that
a hardcoded value is not suitable for all kinds of workloads.
To optimize this, add an rbtree to the inode and hold the inode
preallocations in this rbtree. This will make iterating over inode PAs
faster and scale much better than a linked list. Additionally, we also
had to remove the LRU logic that was added during trimming of the list
(in ext4_mb_release_context()) as it will add extra overhead in rbtree.
The discards now happen in the lowest-logical-offset-first order.
** Locking notes **
With the introduction of rbtree to maintain inode PAs, we can't use RCU
to walk the tree for searching since it can result in partial traversals
which might miss some nodes(or entire subtrees) while discards happen
in parallel (which happens under a lock). Hence this patch converts the
ei->i_prealloc_lock spin_lock to rw_lock.
Almost all the codepaths that read/modify the PA rbtrees are protected
by the higher level inode->i_data_sem (except
ext4_mb_discard_group_preallocations() and ext4_clear_inode()) IIUC, the
only place we need lock protection is when one thread is reading
"searching" the PA rbtree (earlier protected under rcu_read_lock()) and
another is "deleting" the PAs in ext4_mb_discard_group_preallocations()
function (which iterates all the PAs using the grp->bb_prealloc_list and
deletes PAs from the tree without taking any inode lock (i_data_sem)).
So, this patch converts all rcu_read_lock/unlock() paths for inode list
PA to use read_lock() and all places where we were using
ei->i_prealloc_lock spinlock will now be using write_lock().
Note that this makes the fast path (searching of the right PA e.g.
ext4_mb_use_preallocated() or ext4_mb_normalize_request()), now use
read_lock() instead of rcu_read_lock/unlock(). Ths also will now block
due to slow discard path (ext4_mb_discard_group_preallocations()) which
uses write_lock().
But this is not as bad as it looks. This is because -
1. The slow path only occurs when the normal allocation failed and we
can say that we are low on disk space. One can argue this scenario
won't be much frequent.
2. ext4_mb_discard_group_preallocations(), locks and unlocks the rwlock
for deleting every individual PA. This gives enough opportunity for
the fast path to acquire the read_lock for searching the PA inode
list.
Suggested-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/4137bce8f6948fedd8bae134dabae24acfe699c6.1679731817.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2023-03-25 08:13:41 +00:00
|
|
|
struct rb_root i_prealloc_node;
|
|
|
|
rwlock_t i_prealloc_lock;
|
2009-05-01 17:44:33 +00:00
|
|
|
|
2012-11-08 20:18:54 +00:00
|
|
|
/* extents status tree */
|
|
|
|
struct ext4_es_tree i_es_tree;
|
|
|
|
rwlock_t i_es_lock;
|
2014-11-25 16:45:37 +00:00
|
|
|
struct list_head i_es_list;
|
2014-09-02 02:26:49 +00:00
|
|
|
unsigned int i_es_all_nr; /* protected by i_es_lock */
|
2014-11-25 16:45:37 +00:00
|
|
|
unsigned int i_es_shk_nr; /* protected by i_es_lock */
|
2014-11-25 16:51:23 +00:00
|
|
|
ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
|
|
|
|
extents to shrink. Protected by
|
|
|
|
i_es_lock */
|
2012-11-08 20:18:54 +00:00
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/* ialloc */
|
|
|
|
ext4_group_t i_last_alloc_group;
|
|
|
|
|
|
|
|
/* allocation reservation info for delalloc */
|
2017-07-31 02:30:11 +00:00
|
|
|
/* In case of bigalloc, this refer to clusters rather than blocks */
|
2009-05-01 17:44:33 +00:00
|
|
|
unsigned int i_reserved_data_blocks;
|
|
|
|
|
2018-10-01 18:17:41 +00:00
|
|
|
/* pending cluster reservations for bigalloc file systems */
|
|
|
|
struct ext4_pending_tree i_pending_tree;
|
|
|
|
|
2009-05-01 17:44:33 +00:00
|
|
|
/* on-disk additional length */
|
|
|
|
__u16 i_extra_isize;
|
|
|
|
|
2012-12-10 19:04:46 +00:00
|
|
|
/* Indicate the inline data space. */
|
|
|
|
u16 i_inline_off;
|
|
|
|
u16 i_inline_size;
|
|
|
|
|
2009-12-14 12:21:14 +00:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
/* quota space reservation, managed internally by quota code */
|
|
|
|
qsize_t i_reserved_quota;
|
|
|
|
#endif
|
2009-09-28 19:48:29 +00:00
|
|
|
|
2013-06-04 18:21:02 +00:00
|
|
|
/* Lock protecting lists below */
|
2010-03-04 21:14:02 +00:00
|
|
|
spinlock_t i_completed_io_lock;
|
2013-06-04 18:21:02 +00:00
|
|
|
/*
|
|
|
|
* Completed IOs that need unwritten extents handling and have
|
|
|
|
* transaction reserved
|
|
|
|
*/
|
|
|
|
struct list_head i_rsv_conversion_list;
|
|
|
|
struct work_struct i_rsv_conversion_work;
|
2016-03-09 04:39:21 +00:00
|
|
|
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
|
2011-01-10 17:13:42 +00:00
|
|
|
|
|
|
|
spinlock_t i_block_reservation_lock;
|
2009-12-09 04:51:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Transactions that contain inode's metadata needed to complete
|
|
|
|
* fsync and fdatasync, respectively.
|
|
|
|
*/
|
|
|
|
tid_t i_sync_tid;
|
|
|
|
tid_t i_datasync_tid;
|
2012-04-29 22:31:10 +00:00
|
|
|
|
2014-09-29 12:58:25 +00:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
struct dquot *i_dquot[MAXQUOTAS];
|
|
|
|
#endif
|
|
|
|
|
2012-04-29 22:31:10 +00:00
|
|
|
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
|
|
|
|
__u32 i_csum_seed;
|
2015-04-12 04:43:56 +00:00
|
|
|
|
2016-01-08 21:01:21 +00:00
|
|
|
kprojid_t i_projid;
|
2009-05-01 17:44:33 +00:00
|
|
|
};
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* File system states
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_VALID_FS 0x0001 /* Unmounted cleanly */
|
|
|
|
#define EXT4_ERROR_FS 0x0002 /* Errors detected */
|
|
|
|
#define EXT4_ORPHAN_FS 0x0004 /* Orphans being recovered */
|
2020-10-15 20:37:59 +00:00
|
|
|
#define EXT4_FC_REPLAY 0x0020 /* Fast commit replay ongoing */
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2008-02-10 06:11:44 +00:00
|
|
|
/*
|
|
|
|
* Misc. filesystem flags
|
|
|
|
*/
|
|
|
|
#define EXT2_FLAGS_SIGNED_HASH 0x0001 /* Signed dirhash in use */
|
|
|
|
#define EXT2_FLAGS_UNSIGNED_HASH 0x0002 /* Unsigned dirhash in use */
|
|
|
|
#define EXT2_FLAGS_TEST_FILESYS 0x0004 /* to test development code */
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2013-04-04 02:12:52 +00:00
|
|
|
* Mount flags set via mount options or defaults
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
2017-06-22 15:55:14 +00:00
|
|
|
#define EXT4_MOUNT_NO_MBCACHE 0x00001 /* Do not use mbcache */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */
|
|
|
|
#define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */
|
|
|
|
#define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
|
|
|
|
#define EXT4_MOUNT_ERRORS_RO 0x00020 /* Remount fs ro on errors */
|
|
|
|
#define EXT4_MOUNT_ERRORS_PANIC 0x00040 /* Panic on errors */
|
2012-03-03 22:56:23 +00:00
|
|
|
#define EXT4_MOUNT_ERRORS_MASK 0x00070
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */
|
|
|
|
#define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/
|
2015-02-16 23:59:38 +00:00
|
|
|
#ifdef CONFIG_FS_DAX
|
2020-05-28 14:59:57 +00:00
|
|
|
#define EXT4_MOUNT_DAX_ALWAYS 0x00200 /* Direct Access */
|
2015-02-16 23:59:38 +00:00
|
|
|
#else
|
2020-05-28 14:59:57 +00:00
|
|
|
#define EXT4_MOUNT_DAX_ALWAYS 0
|
2015-02-16 23:59:38 +00:00
|
|
|
#endif
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */
|
|
|
|
#define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */
|
|
|
|
#define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */
|
|
|
|
#define EXT4_MOUNT_WRITEBACK_DATA 0x00C00 /* No data ordering */
|
|
|
|
#define EXT4_MOUNT_UPDATE_JOURNAL 0x01000 /* Update the journal format */
|
|
|
|
#define EXT4_MOUNT_NO_UID32 0x02000 /* Disable 32-bit UIDs */
|
|
|
|
#define EXT4_MOUNT_XATTR_USER 0x04000 /* Extended user attributes */
|
|
|
|
#define EXT4_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */
|
2009-03-17 03:12:23 +00:00
|
|
|
#define EXT4_MOUNT_NO_AUTO_DA_ALLOC 0x10000 /* No auto delalloc mapping */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MOUNT_BARRIER 0x20000 /* Use block barriers */
|
2016-09-06 03:08:16 +00:00
|
|
|
#define EXT4_MOUNT_QUOTA 0x40000 /* Some quota option set */
|
|
|
|
#define EXT4_MOUNT_USRQUOTA 0x80000 /* "old" user quota,
|
|
|
|
* enable enforcement for hidden
|
|
|
|
* quota files */
|
|
|
|
#define EXT4_MOUNT_GRPQUOTA 0x100000 /* "old" group quota, enable
|
|
|
|
* enforcement for hidden quota
|
|
|
|
* files */
|
|
|
|
#define EXT4_MOUNT_PRJQUOTA 0x200000 /* Enable project quota
|
|
|
|
* enforcement */
|
2010-03-04 21:14:02 +00:00
|
|
|
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
|
2009-11-02 18:15:27 +00:00
|
|
|
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
|
2008-01-29 04:58:27 +00:00
|
|
|
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
|
2018-06-13 03:34:57 +00:00
|
|
|
#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */
|
2021-04-01 17:21:29 +00:00
|
|
|
#define EXT4_MOUNT_NO_PREFETCH_BLOCK_BITMAPS 0x4000000
|
2008-07-11 23:27:31 +00:00
|
|
|
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
|
2008-10-11 02:12:43 +00:00
|
|
|
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
|
2009-05-17 19:38:01 +00:00
|
|
|
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
|
2009-11-19 19:25:42 +00:00
|
|
|
#define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
#define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */
|
2008-10-11 02:12:43 +00:00
|
|
|
|
2013-04-04 02:12:52 +00:00
|
|
|
/*
|
|
|
|
* Mount flags set either automatically (could not be set by mount option)
|
|
|
|
* based on per file system feature or property or in special cases such as
|
|
|
|
* distinguishing between explicit mount option definition and default.
|
|
|
|
*/
|
2011-09-03 22:22:38 +00:00
|
|
|
#define EXT4_MOUNT2_EXPLICIT_DELALLOC 0x00000001 /* User explicitly
|
|
|
|
specified delalloc */
|
2013-04-04 02:12:52 +00:00
|
|
|
#define EXT4_MOUNT2_STD_GROUP_SIZE 0x00000002 /* We have standard group
|
|
|
|
size of blocksize * 8
|
|
|
|
blocks */
|
2014-03-24 18:09:06 +00:00
|
|
|
#define EXT4_MOUNT2_HURD_COMPAT 0x00000004 /* Support HURD-castrated
|
|
|
|
file systems */
|
ext4: do not allow journal_opts for fs w/o journal
It is appeared that we can pass journal related mount options and such options
be shown in /proc/mounts
Example:
#mkfs.ext4 -F /dev/vdb
#tune2fs -O ^has_journal /dev/vdb
#mount /dev/vdb /mnt/ -ocommit=20,journal_async_commit
#cat /proc/mounts | grep /mnt
/dev/vdb /mnt ext4 rw,relatime,journal_checksum,journal_async_commit,commit=20,data=ordered 0 0
But options:"journal_checksum,journal_async_commit,commit=20,data=ordered" has
nothing with reality because there is no journal at all.
This patch disallow following options for journalless configurations:
- journal_checksum
- journal_async_commit
- commit=%ld
- data={writeback,ordered,journal}
Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
2015-10-19 03:50:26 +00:00
|
|
|
#define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM 0x00000008 /* User explicitly
|
|
|
|
specified journal checksum */
|
|
|
|
|
2020-10-15 20:37:54 +00:00
|
|
|
#define EXT4_MOUNT2_JOURNAL_FAST_COMMIT 0x00000010 /* Journal fast commit */
|
2020-11-11 18:32:09 +00:00
|
|
|
#define EXT4_MOUNT2_DAX_NEVER 0x00000020 /* Do not allow Direct Access */
|
|
|
|
#define EXT4_MOUNT2_DAX_INODE 0x00000040 /* For printing options only */
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
#define EXT4_MOUNT2_MB_OPTIMIZE_SCAN 0x00000080 /* Optimize group
|
|
|
|
* scanning in mballoc
|
|
|
|
*/
|
2023-06-16 16:50:50 +00:00
|
|
|
#define EXT4_MOUNT2_ABORT 0x00000100 /* Abort filesystem */
|
2020-10-15 20:37:54 +00:00
|
|
|
|
2010-12-16 01:26:48 +00:00
|
|
|
#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \
|
|
|
|
~EXT4_MOUNT_##opt
|
|
|
|
#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \
|
|
|
|
EXT4_MOUNT_##opt
|
2006-10-11 08:20:53 +00:00
|
|
|
#define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \
|
|
|
|
EXT4_MOUNT_##opt)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2010-12-16 01:30:48 +00:00
|
|
|
#define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \
|
|
|
|
~EXT4_MOUNT2_##opt
|
|
|
|
#define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \
|
|
|
|
EXT4_MOUNT2_##opt
|
|
|
|
#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \
|
|
|
|
EXT4_MOUNT2_##opt)
|
|
|
|
|
ext4: use proper little-endian bitops
ext4_{set,clear}_bit() is defined as __test_and_{set,clear}_bit_le() for
ext4. Only two ext4_{set,clear}_bit() calls check the return value. The
rest of calls ignore the return value and they can be replaced with
__{set,clear}_bit_le().
This changes ext4_{set,clear}_bit() from __test_and_{set,clear}_bit_le()
to __{set,clear}_bit_le() and introduces ext4_test_and_{set,clear}_bit()
for the two places where old bit needs to be returned.
This ext4_{set,clear}_bit() change is considered safe, because if someone
uses these macros without noticing the change, new ext4_{set,clear}_bit
don't have return value and causes compiler errors where the return value
is used.
This also removes unused ext4_find_first_zero_bit().
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2011-12-29 01:32:07 +00:00
|
|
|
#define ext4_test_and_set_bit __test_and_set_bit_le
|
|
|
|
#define ext4_set_bit __set_bit_le
|
|
|
|
#define ext4_test_and_clear_bit __test_and_clear_bit_le
|
|
|
|
#define ext4_clear_bit __clear_bit_le
|
2011-03-23 23:42:07 +00:00
|
|
|
#define ext4_test_bit test_bit_le
|
|
|
|
#define ext4_find_next_zero_bit find_next_zero_bit_le
|
|
|
|
#define ext4_find_next_bit find_next_bit_le
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2022-02-16 07:02:47 +00:00
|
|
|
extern void mb_set_bits(void *bm, int cur, int len);
|
2011-07-27 02:05:53 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Maximal mount counts between two filesystem checks
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DFL_MAX_MNT_COUNT 20 /* Allow 20 mounts */
|
|
|
|
#define EXT4_DFL_CHECKINTERVAL 0 /* Don't use interval check */
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Behaviour when detecting errors
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_ERRORS_CONTINUE 1 /* Continue execution */
|
|
|
|
#define EXT4_ERRORS_RO 2 /* Remount fs read-only */
|
|
|
|
#define EXT4_ERRORS_PANIC 3 /* Panic */
|
|
|
|
#define EXT4_ERRORS_DEFAULT EXT4_ERRORS_CONTINUE
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2012-04-29 22:23:10 +00:00
|
|
|
/* Metadata checksum algorithm codes */
|
|
|
|
#define EXT4_CRC32C_CHKSUM 1
|
|
|
|
|
2021-12-13 13:56:18 +00:00
|
|
|
#define EXT4_LABEL_MAX 16
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Structure of the super block
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_super_block {
|
2006-10-11 08:20:50 +00:00
|
|
|
/*00*/ __le32 s_inodes_count; /* Inodes count */
|
2007-10-16 22:38:25 +00:00
|
|
|
__le32 s_blocks_count_lo; /* Blocks count */
|
2007-10-16 22:38:25 +00:00
|
|
|
__le32 s_r_blocks_count_lo; /* Reserved blocks count */
|
|
|
|
__le32 s_free_blocks_count_lo; /* Free blocks count */
|
2006-10-11 08:20:50 +00:00
|
|
|
/*10*/ __le32 s_free_inodes_count; /* Free inodes count */
|
|
|
|
__le32 s_first_data_block; /* First Data Block */
|
|
|
|
__le32 s_log_block_size; /* Block size */
|
2011-09-09 22:34:51 +00:00
|
|
|
__le32 s_log_cluster_size; /* Allocation cluster size */
|
2006-10-11 08:20:50 +00:00
|
|
|
/*20*/ __le32 s_blocks_per_group; /* # Blocks per group */
|
2011-09-09 22:34:51 +00:00
|
|
|
__le32 s_clusters_per_group; /* # Clusters per group */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 s_inodes_per_group; /* # Inodes per group */
|
|
|
|
__le32 s_mtime; /* Mount time */
|
|
|
|
/*30*/ __le32 s_wtime; /* Write time */
|
|
|
|
__le16 s_mnt_count; /* Mount count */
|
|
|
|
__le16 s_max_mnt_count; /* Maximal mount count */
|
|
|
|
__le16 s_magic; /* Magic signature */
|
|
|
|
__le16 s_state; /* File system state */
|
|
|
|
__le16 s_errors; /* Behaviour when detecting errors */
|
|
|
|
__le16 s_minor_rev_level; /* minor revision level */
|
|
|
|
/*40*/ __le32 s_lastcheck; /* time of last check */
|
|
|
|
__le32 s_checkinterval; /* max. time between checks */
|
|
|
|
__le32 s_creator_os; /* OS */
|
|
|
|
__le32 s_rev_level; /* Revision level */
|
|
|
|
/*50*/ __le16 s_def_resuid; /* Default uid for reserved blocks */
|
|
|
|
__le16 s_def_resgid; /* Default gid for reserved blocks */
|
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* These fields are for EXT4_DYNAMIC_REV superblocks only.
|
2006-10-11 08:20:50 +00:00
|
|
|
*
|
|
|
|
* Note: the difference between the compatible feature set and
|
|
|
|
* the incompatible feature set is that if there is a bit set
|
|
|
|
* in the incompatible feature set that the kernel doesn't
|
|
|
|
* know about, it should refuse to mount the filesystem.
|
|
|
|
*
|
|
|
|
* e2fsck's requirements are more strict; if it doesn't know
|
|
|
|
* about a feature in either the compatible or incompatible
|
|
|
|
* feature set, it must abort and not try to meddle with
|
|
|
|
* things it doesn't understand...
|
|
|
|
*/
|
|
|
|
__le32 s_first_ino; /* First non-reserved inode */
|
2006-10-11 08:21:14 +00:00
|
|
|
__le16 s_inode_size; /* size of inode structure */
|
2006-10-11 08:20:50 +00:00
|
|
|
__le16 s_block_group_nr; /* block group # of this superblock */
|
|
|
|
__le32 s_feature_compat; /* compatible feature set */
|
|
|
|
/*60*/ __le32 s_feature_incompat; /* incompatible feature set */
|
|
|
|
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
|
|
|
|
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
|
2021-12-13 13:56:18 +00:00
|
|
|
/*78*/ char s_volume_name[EXT4_LABEL_MAX]; /* volume name */
|
2018-08-27 05:15:11 +00:00
|
|
|
/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
|
2006-10-11 08:20:50 +00:00
|
|
|
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
|
|
|
|
/*
|
|
|
|
* Performance hints. Directory preallocation should only
|
2006-10-11 08:20:53 +00:00
|
|
|
* happen if the EXT4_FEATURE_COMPAT_DIR_PREALLOC flag is on.
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
|
|
|
__u8 s_prealloc_blocks; /* Nr of blocks to try to preallocate*/
|
|
|
|
__u8 s_prealloc_dir_blocks; /* Nr to preallocate for dirs */
|
|
|
|
__le16 s_reserved_gdt_blocks; /* Per group desc for online growth */
|
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* Journaling support valid if EXT4_FEATURE_COMPAT_HAS_JOURNAL set.
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
|
|
|
/*D0*/ __u8 s_journal_uuid[16]; /* uuid of journal superblock */
|
|
|
|
/*E0*/ __le32 s_journal_inum; /* inode number of journal file */
|
|
|
|
__le32 s_journal_dev; /* device number of journal file */
|
|
|
|
__le32 s_last_orphan; /* start of list of inodes to delete */
|
|
|
|
__le32 s_hash_seed[4]; /* HTREE hash seed */
|
|
|
|
__u8 s_def_hash_version; /* Default hash version to use */
|
2010-07-27 15:56:04 +00:00
|
|
|
__u8 s_jnl_backup_type;
|
2006-10-11 08:21:14 +00:00
|
|
|
__le16 s_desc_size; /* size of group descriptor */
|
2006-10-11 08:21:10 +00:00
|
|
|
/*100*/ __le32 s_default_mount_opts;
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 s_first_meta_bg; /* First metablock block group */
|
2006-10-11 08:21:10 +00:00
|
|
|
__le32 s_mkfs_time; /* When the filesystem was created */
|
|
|
|
__le32 s_jnl_blocks[17]; /* Backup of the journal inode */
|
2023-03-01 13:38:42 +00:00
|
|
|
/* 64bit support valid if EXT4_FEATURE_INCOMPAT_64BIT */
|
2006-10-11 08:21:10 +00:00
|
|
|
/*150*/ __le32 s_blocks_count_hi; /* Blocks count */
|
|
|
|
__le32 s_r_blocks_count_hi; /* Reserved blocks count */
|
|
|
|
__le32 s_free_blocks_count_hi; /* Free blocks count */
|
2007-10-16 22:38:25 +00:00
|
|
|
__le16 s_min_extra_isize; /* All inodes have at least # bytes */
|
|
|
|
__le16 s_want_extra_isize; /* New inodes should reserve # bytes */
|
|
|
|
__le32 s_flags; /* Miscellaneous flags */
|
|
|
|
__le16 s_raid_stride; /* RAID stride */
|
2011-05-24 22:31:25 +00:00
|
|
|
__le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */
|
2007-10-16 22:38:25 +00:00
|
|
|
__le64 s_mmp_block; /* Block for multi-mount protection */
|
|
|
|
__le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
|
2008-07-11 23:27:31 +00:00
|
|
|
__u8 s_log_groups_per_flex; /* FLEX_BG group size */
|
2012-04-29 22:23:10 +00:00
|
|
|
__u8 s_checksum_type; /* metadata checksum algorithm used */
|
2015-04-11 11:44:12 +00:00
|
|
|
__u8 s_encryption_level; /* versioning level for encryption */
|
|
|
|
__u8 s_reserved_pad; /* Padding to next 32bits */
|
2009-03-01 00:39:58 +00:00
|
|
|
__le64 s_kbytes_written; /* nr of lifetime kilobytes written */
|
2010-06-29 15:00:23 +00:00
|
|
|
__le32 s_snapshot_inum; /* Inode number of active snapshot */
|
|
|
|
__le32 s_snapshot_id; /* sequential ID of active snapshot */
|
|
|
|
__le64 s_snapshot_r_blocks_count; /* reserved blocks for active
|
|
|
|
snapshot's future use */
|
|
|
|
__le32 s_snapshot_list; /* inode number of the head of the
|
|
|
|
on-disk snapshot list */
|
2010-07-27 15:56:03 +00:00
|
|
|
#define EXT4_S_ERR_START offsetof(struct ext4_super_block, s_error_count)
|
|
|
|
__le32 s_error_count; /* number of fs errors */
|
|
|
|
__le32 s_first_error_time; /* first time an error happened */
|
|
|
|
__le32 s_first_error_ino; /* inode involved in first error */
|
|
|
|
__le64 s_first_error_block; /* block involved of first error */
|
2018-08-27 05:15:11 +00:00
|
|
|
__u8 s_first_error_func[32] __nonstring; /* function where the error happened */
|
2010-07-27 15:56:03 +00:00
|
|
|
__le32 s_first_error_line; /* line number where error happened */
|
|
|
|
__le32 s_last_error_time; /* most recent time of an error */
|
|
|
|
__le32 s_last_error_ino; /* inode involved in last error */
|
|
|
|
__le32 s_last_error_line; /* line number where error happened */
|
|
|
|
__le64 s_last_error_block; /* block involved of last error */
|
2018-08-27 05:15:11 +00:00
|
|
|
__u8 s_last_error_func[32] __nonstring; /* function where the error happened */
|
2010-08-02 03:14:20 +00:00
|
|
|
#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
|
|
|
|
__u8 s_mount_opts[64];
|
2011-09-09 22:34:51 +00:00
|
|
|
__le32 s_usr_quota_inum; /* inode for tracking user quota */
|
|
|
|
__le32 s_grp_quota_inum; /* inode for tracking group quota */
|
|
|
|
__le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
|
2014-05-12 14:16:06 +00:00
|
|
|
__le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */
|
2015-01-19 21:00:58 +00:00
|
|
|
__u8 s_encrypt_algos[4]; /* Encryption algorithms in use */
|
2015-04-11 11:44:12 +00:00
|
|
|
__u8 s_encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
|
|
|
|
__le32 s_lpf_ino; /* Location of the lost+found inode */
|
2015-10-17 20:15:18 +00:00
|
|
|
__le32 s_prj_quota_inum; /* inode for tracking project quota */
|
2015-10-17 20:16:02 +00:00
|
|
|
__le32 s_checksum_seed; /* crc32c(uuid) if csum_seed set */
|
2018-07-29 19:51:48 +00:00
|
|
|
__u8 s_wtime_hi;
|
|
|
|
__u8 s_mtime_hi;
|
|
|
|
__u8 s_mkfs_time_hi;
|
|
|
|
__u8 s_lastcheck_hi;
|
|
|
|
__u8 s_first_error_time_hi;
|
|
|
|
__u8 s_last_error_time_hi;
|
2019-11-20 02:54:15 +00:00
|
|
|
__u8 s_first_error_errcode;
|
|
|
|
__u8 s_last_error_errcode;
|
2019-04-25 18:05:42 +00:00
|
|
|
__le16 s_encoding; /* Filename charset encoding */
|
|
|
|
__le16 s_encoding_flags; /* Filename charset encoding flags */
|
2021-08-16 09:57:06 +00:00
|
|
|
__le32 s_orphan_file_inum; /* Inode for tracking orphan inodes */
|
|
|
|
__le32 s_reserved[94]; /* Padding to the end of the block */
|
2012-04-29 22:23:10 +00:00
|
|
|
__le32 s_checksum; /* crc32c(superblock) */
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2010-07-27 15:56:03 +00:00
|
|
|
#define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
#ifdef __KERNEL__
|
2009-06-13 14:09:48 +00:00
|
|
|
|
2014-09-11 15:15:15 +00:00
|
|
|
/* Number of quota types we support */
|
2016-01-08 21:01:22 +00:00
|
|
|
#define EXT4_MAXQUOTAS 3
|
2014-09-11 15:15:15 +00:00
|
|
|
|
2019-04-25 18:05:42 +00:00
|
|
|
#define EXT4_ENC_UTF8_12_1 1
|
|
|
|
|
2021-08-16 09:57:04 +00:00
|
|
|
/* Types of ext4 journal triggers */
|
|
|
|
enum ext4_journal_trigger_type {
|
2021-08-16 09:57:06 +00:00
|
|
|
EXT4_JTR_ORPHAN_FILE,
|
2021-08-16 09:57:04 +00:00
|
|
|
EXT4_JTR_NONE /* This must be the last entry for indexing to work! */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define EXT4_JOURNAL_TRIGGER_COUNT EXT4_JTR_NONE
|
|
|
|
|
|
|
|
struct ext4_journal_trigger {
|
|
|
|
struct jbd2_buffer_trigger_type tr_triggers;
|
|
|
|
struct super_block *sb;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct ext4_journal_trigger *EXT4_TRIGGER(
|
|
|
|
struct jbd2_buffer_trigger_type *trigger)
|
|
|
|
{
|
|
|
|
return container_of(trigger, struct ext4_journal_trigger, tr_triggers);
|
|
|
|
}
|
|
|
|
|
2021-08-16 09:57:06 +00:00
|
|
|
#define EXT4_ORPHAN_BLOCK_MAGIC 0x0b10ca04
|
|
|
|
|
|
|
|
/* Structure at the tail of orphan block */
|
|
|
|
struct ext4_orphan_block_tail {
|
|
|
|
__le32 ob_magic;
|
|
|
|
__le32 ob_checksum;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline int ext4_inodes_per_orphan_block(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return (sb->s_blocksize - sizeof(struct ext4_orphan_block_tail)) /
|
|
|
|
sizeof(u32);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ext4_orphan_block {
|
2021-08-16 09:57:08 +00:00
|
|
|
atomic_t ob_free_entries; /* Number of free orphan entries in block */
|
2021-08-16 09:57:06 +00:00
|
|
|
struct buffer_head *ob_bh; /* Buffer for orphan block */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Info about orphan file.
|
|
|
|
*/
|
|
|
|
struct ext4_orphan_info {
|
|
|
|
int of_blocks; /* Number of orphan blocks in a file */
|
|
|
|
__u32 of_csum_seed; /* Checksum seed for orphan file */
|
|
|
|
struct ext4_orphan_block *of_binfo; /* Array with info about orphan
|
|
|
|
* file blocks */
|
|
|
|
};
|
|
|
|
|
2009-05-03 20:33:44 +00:00
|
|
|
/*
|
|
|
|
* fourth extended-fs super-block data in memory
|
|
|
|
*/
|
|
|
|
struct ext4_sb_info {
|
|
|
|
unsigned long s_desc_size; /* Size of a group descriptor in bytes */
|
|
|
|
unsigned long s_inodes_per_block;/* Number of inodes per block */
|
|
|
|
unsigned long s_blocks_per_group;/* Number of blocks in a group */
|
2011-09-09 22:34:51 +00:00
|
|
|
unsigned long s_clusters_per_group; /* Number of clusters in a group */
|
2009-05-03 20:33:44 +00:00
|
|
|
unsigned long s_inodes_per_group;/* Number of inodes in a group */
|
|
|
|
unsigned long s_itb_per_group; /* Number of inode table blocks per group */
|
|
|
|
unsigned long s_gdb_count; /* Number of group descriptor blocks */
|
|
|
|
unsigned long s_desc_per_block; /* Number of group descriptors per block */
|
|
|
|
ext4_group_t s_groups_count; /* Number of groups in the fs */
|
ext4: limit block allocations for indirect-block files to < 2^32
Today, the ext4 allocator will happily allocate blocks past
2^32 for indirect-block files, which results in the block
numbers getting truncated, and corruption ensues.
This patch limits such allocations to < 2^32, and adds
BUG_ONs if we do get blocks larger than that.
This should address RH Bug 519471, ext4 bitmap allocator
must limit blocks to < 2^32
* ext4_find_goal() is modified to choose a goal < UINT_MAX,
so that our starting point is in an acceptable range.
* ext4_xattr_block_set() is modified such that the goal block
is < UINT_MAX, as above.
* ext4_mb_regular_allocator() is modified so that the group
search does not continue into groups which are too high
* ext4_mb_use_preallocated() has a check that we don't use
preallocated space which is too far out
* ext4_alloc_blocks() and ext4_xattr_block_set() add some BUG_ONs
No attempt has been made to limit inode locations to < 2^32,
so we may wind up with blocks far from their inodes. Doing
this much already will lead to some odd ENOSPC issues when the
"lower 32" gets full, and further restricting inodes could
make that even weirder.
For high inodes, choosing a goal of the original, % UINT_MAX,
may be a bit odd, but then we're in an odd situation anyway,
and I don't know of a better heuristic.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-16 18:45:10 +00:00
|
|
|
ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
|
2012-07-09 20:27:05 +00:00
|
|
|
unsigned long s_overhead; /* # of fs overhead clusters */
|
2011-09-09 22:34:51 +00:00
|
|
|
unsigned int s_cluster_ratio; /* Number of blocks per cluster */
|
|
|
|
unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
|
2009-05-03 20:33:44 +00:00
|
|
|
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
|
|
|
|
struct buffer_head * s_sbh; /* Buffer containing the super block */
|
|
|
|
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
|
2023-08-26 17:47:11 +00:00
|
|
|
/* Array of bh's for the block group descriptors */
|
2020-02-15 21:40:37 +00:00
|
|
|
struct buffer_head * __rcu *s_group_desc;
|
2009-06-13 14:09:41 +00:00
|
|
|
unsigned int s_mount_opt;
|
2010-12-16 01:30:48 +00:00
|
|
|
unsigned int s_mount_opt2;
|
2020-11-06 03:59:09 +00:00
|
|
|
unsigned long s_mount_flags;
|
2012-03-05 00:27:31 +00:00
|
|
|
unsigned int s_def_mount_opt;
|
2023-01-29 03:49:39 +00:00
|
|
|
unsigned int s_def_mount_opt2;
|
2009-05-03 20:33:44 +00:00
|
|
|
ext4_fsblk_t s_sb_block;
|
2013-04-10 02:11:22 +00:00
|
|
|
atomic64_t s_resv_clusters;
|
2012-02-07 23:41:49 +00:00
|
|
|
kuid_t s_resuid;
|
|
|
|
kgid_t s_resgid;
|
2009-05-03 20:33:44 +00:00
|
|
|
unsigned short s_mount_state;
|
|
|
|
unsigned short s_pad;
|
|
|
|
int s_addr_per_block_bits;
|
|
|
|
int s_desc_per_block_bits;
|
|
|
|
int s_inode_size;
|
|
|
|
int s_first_ino;
|
|
|
|
unsigned int s_inode_readahead_blks;
|
2009-06-13 15:45:35 +00:00
|
|
|
unsigned int s_inode_goal;
|
2009-05-03 20:33:44 +00:00
|
|
|
u32 s_hash_seed[4];
|
|
|
|
int s_def_hash_version;
|
2021-05-27 23:55:57 +00:00
|
|
|
int s_hash_unsigned; /* 3 if hash should be unsigned, 0 if not */
|
2011-09-09 22:56:51 +00:00
|
|
|
struct percpu_counter s_freeclusters_counter;
|
2009-05-03 20:33:44 +00:00
|
|
|
struct percpu_counter s_freeinodes_counter;
|
|
|
|
struct percpu_counter s_dirs_counter;
|
2011-09-09 22:56:51 +00:00
|
|
|
struct percpu_counter s_dirtyclusters_counter;
|
2021-02-18 15:11:32 +00:00
|
|
|
struct percpu_counter s_sra_exceeded_retry_limit;
|
2009-05-03 20:33:44 +00:00
|
|
|
struct blockgroup_lock *s_blockgroup_lock;
|
|
|
|
struct proc_dir_entry *s_proc;
|
|
|
|
struct kobject s_kobj;
|
|
|
|
struct completion s_kobj_unregister;
|
2012-05-31 02:56:46 +00:00
|
|
|
struct super_block *s_sb;
|
2021-04-30 18:50:46 +00:00
|
|
|
struct buffer_head *s_mmp_bh;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
/* Journaling */
|
|
|
|
struct journal_s *s_journal;
|
2017-02-05 06:27:48 +00:00
|
|
|
unsigned long s_ext4_flags; /* Ext4 superblock flags */
|
2021-08-16 09:57:06 +00:00
|
|
|
struct mutex s_orphan_lock; /* Protects on disk list changes */
|
|
|
|
struct list_head s_orphan; /* List of orphaned inodes in on disk
|
|
|
|
list */
|
|
|
|
struct ext4_orphan_info s_orphan_info;
|
2009-05-03 20:33:44 +00:00
|
|
|
unsigned long s_commit_interval;
|
|
|
|
u32 s_max_batch_time;
|
|
|
|
u32 s_min_batch_time;
|
2020-09-24 03:03:42 +00:00
|
|
|
struct block_device *s_journal_bdev;
|
2009-05-03 20:33:44 +00:00
|
|
|
#ifdef CONFIG_QUOTA
|
2018-10-12 13:28:09 +00:00
|
|
|
/* Names of quota files with journalled quota */
|
|
|
|
char __rcu *s_qf_names[EXT4_MAXQUOTAS];
|
2009-05-03 20:33:44 +00:00
|
|
|
int s_jquota_fmt; /* Format of quota to use */
|
|
|
|
#endif
|
|
|
|
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
|
2020-09-24 03:03:43 +00:00
|
|
|
struct ext4_system_blocks __rcu *s_system_blks;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
#ifdef EXTENTS_STATS
|
|
|
|
/* ext4 extents stats */
|
|
|
|
unsigned long s_ext_min;
|
|
|
|
unsigned long s_ext_max;
|
|
|
|
unsigned long s_depth_max;
|
|
|
|
spinlock_t s_ext_stats_lock;
|
|
|
|
unsigned long s_ext_blocks;
|
|
|
|
unsigned long s_ext_extents;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* for buddy allocator */
|
2020-02-19 03:08:50 +00:00
|
|
|
struct ext4_group_info ** __rcu *s_group_info;
|
2009-05-03 20:33:44 +00:00
|
|
|
struct inode *s_buddy_cache;
|
|
|
|
spinlock_t s_md_lock;
|
|
|
|
unsigned short *s_mb_offsets;
|
|
|
|
unsigned int *s_mb_maxs;
|
2012-09-05 05:31:50 +00:00
|
|
|
unsigned int s_group_info_size;
|
2016-06-26 22:24:01 +00:00
|
|
|
unsigned int s_mb_free_pending;
|
2023-06-12 12:40:17 +00:00
|
|
|
struct list_head s_freed_data_list[2]; /* List of blocks to be freed
|
2017-06-23 03:54:33 +00:00
|
|
|
after commit completed */
|
2021-07-24 07:41:23 +00:00
|
|
|
struct list_head s_discard_list;
|
|
|
|
struct work_struct s_discard_work;
|
2021-08-30 07:52:46 +00:00
|
|
|
atomic_t s_retry_alloc_pending;
|
2022-09-08 09:21:28 +00:00
|
|
|
struct list_head *s_mb_avg_fragment_size;
|
|
|
|
rwlock_t *s_mb_avg_fragment_size_locks;
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
struct list_head *s_mb_largest_free_orders;
|
|
|
|
rwlock_t *s_mb_largest_free_orders_locks;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
/* tunables */
|
|
|
|
unsigned long s_stripe;
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
unsigned int s_mb_max_linear_groups;
|
2009-05-03 20:33:44 +00:00
|
|
|
unsigned int s_mb_stream_request;
|
|
|
|
unsigned int s_mb_max_to_scan;
|
|
|
|
unsigned int s_mb_min_to_scan;
|
|
|
|
unsigned int s_mb_stats;
|
|
|
|
unsigned int s_mb_order2_reqs;
|
|
|
|
unsigned int s_mb_group_prealloc;
|
2012-08-17 13:48:17 +00:00
|
|
|
unsigned int s_max_dir_size_kb;
|
2009-05-03 20:33:44 +00:00
|
|
|
/* where last allocation was done - for stream allocation */
|
|
|
|
unsigned long s_mb_last_group;
|
|
|
|
unsigned long s_mb_last_start;
|
2020-04-21 07:54:07 +00:00
|
|
|
unsigned int s_mb_prefetch;
|
|
|
|
unsigned int s_mb_prefetch_limit;
|
2023-05-30 12:33:50 +00:00
|
|
|
unsigned int s_mb_best_avail_max_trim_order;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
/* stats for buddy allocator */
|
|
|
|
atomic_t s_bal_reqs; /* number of reqs with len > 1 */
|
|
|
|
atomic_t s_bal_success; /* we found long enough chunks */
|
|
|
|
atomic_t s_bal_allocated; /* in blocks */
|
|
|
|
atomic_t s_bal_ex_scanned; /* total extents scanned */
|
2023-05-30 12:33:43 +00:00
|
|
|
atomic_t s_bal_cX_ex_scanned[EXT4_MB_NUM_CRS]; /* total extents scanned */
|
2021-04-01 17:21:25 +00:00
|
|
|
atomic_t s_bal_groups_scanned; /* number of groups scanned */
|
2009-05-03 20:33:44 +00:00
|
|
|
atomic_t s_bal_goals; /* goal hits */
|
2023-05-30 12:33:44 +00:00
|
|
|
atomic_t s_bal_len_goals; /* len goal hits */
|
2009-05-03 20:33:44 +00:00
|
|
|
atomic_t s_bal_breaks; /* too long searches */
|
|
|
|
atomic_t s_bal_2orders; /* 2^order hits */
|
2023-05-30 12:33:50 +00:00
|
|
|
atomic_t s_bal_p2_aligned_bad_suggestions;
|
|
|
|
atomic_t s_bal_goal_fast_bad_suggestions;
|
|
|
|
atomic_t s_bal_best_avail_bad_suggestions;
|
2023-05-30 12:33:42 +00:00
|
|
|
atomic64_t s_bal_cX_groups_considered[EXT4_MB_NUM_CRS];
|
|
|
|
atomic64_t s_bal_cX_hits[EXT4_MB_NUM_CRS];
|
|
|
|
atomic64_t s_bal_cX_failed[EXT4_MB_NUM_CRS]; /* cX loop didn't find blocks */
|
2021-04-01 17:21:23 +00:00
|
|
|
atomic_t s_mb_buddies_generated; /* number of buddies generated */
|
|
|
|
atomic64_t s_mb_generation_time;
|
2009-05-03 20:33:44 +00:00
|
|
|
atomic_t s_mb_lost_chunks;
|
|
|
|
atomic_t s_mb_preallocated;
|
|
|
|
atomic_t s_mb_discarded;
|
ext4: Avoid group preallocation for closed files
Currently the group preallocation code tries to find a large (512)
free block from which to do per-cpu group allocation for small files.
The problem with this scheme is that it leaves the filesystem horribly
fragmented. In the worst case, if the filesystem is unmounted and
remounted (after a system shutdown, for example) we forget the fact
that wee were using a particular (now-partially filled) 512 block
extent. So the next time we try to allocate space for a small file,
we will find *another* completely free 512 block chunk to allocate
small files. Given that there are 32,768 blocks in a block group,
after 64 iterations of "mount, write one 4k file in a directory,
unmount", the block group will have 64 files, each separated by 511
blocks, and the block group will no longer have any free 512
completely free chunks of blocks for group preallocation space.
So if we try to allocate blocks for a file that has been closed, such
that we know the final size of the file, and the filesystem is not
busy, avoid using group preallocation.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-18 17:34:02 +00:00
|
|
|
atomic_t s_lock_busy;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
/* locality groups */
|
2010-02-02 05:39:01 +00:00
|
|
|
struct ext4_locality_group __percpu *s_locality_groups;
|
2009-05-03 20:33:44 +00:00
|
|
|
|
|
|
|
/* for write statistics */
|
|
|
|
unsigned long s_sectors_written_start;
|
|
|
|
u64 s_kbytes_written;
|
|
|
|
|
2012-08-17 13:54:17 +00:00
|
|
|
/* the size of zero-out chunk */
|
|
|
|
unsigned int s_extent_max_zeroout_kb;
|
|
|
|
|
2009-05-03 20:33:44 +00:00
|
|
|
unsigned int s_log_groups_per_flex;
|
2020-02-19 03:08:51 +00:00
|
|
|
struct flex_groups * __rcu *s_flex_groups;
|
2012-09-05 05:29:50 +00:00
|
|
|
ext4_group_t s_flex_groups_allocated;
|
2009-09-28 19:48:41 +00:00
|
|
|
|
2013-06-04 18:21:02 +00:00
|
|
|
/* workqueue for reserved extent conversions (buffered io) */
|
|
|
|
struct workqueue_struct *rsv_conversion_wq;
|
2010-07-27 15:56:04 +00:00
|
|
|
|
|
|
|
/* timer for periodic error stats printing */
|
|
|
|
struct timer_list s_err_report;
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
|
|
|
|
/* Lazy inode table initialization info */
|
|
|
|
struct ext4_li_request *s_li_request;
|
|
|
|
/* Wait multiplier for lazy initialization thread */
|
|
|
|
unsigned int s_li_wait_mult;
|
2011-05-24 22:31:25 +00:00
|
|
|
|
|
|
|
/* Kernel thread for multiple mount protection */
|
|
|
|
struct task_struct *s_mmp_tsk;
|
2011-07-11 04:03:38 +00:00
|
|
|
|
|
|
|
/* record the last minlen when FITRIM is called. */
|
2021-11-03 14:51:21 +00:00
|
|
|
unsigned long s_last_trim_minblks;
|
2012-04-29 22:27:10 +00:00
|
|
|
|
|
|
|
/* Reference to checksum algorithm driver via cryptoapi */
|
|
|
|
struct crypto_shash *s_chksum_driver;
|
2012-04-29 22:29:10 +00:00
|
|
|
|
|
|
|
/* Precomputed FS UUID checksum for seeding other checksums */
|
|
|
|
__u32 s_csum_seed;
|
2013-02-18 05:32:55 +00:00
|
|
|
|
|
|
|
/* Reclaim extents from extent status tree */
|
|
|
|
struct shrinker s_es_shrinker;
|
2014-11-25 16:51:23 +00:00
|
|
|
struct list_head s_es_list; /* List of inodes with reclaimable extents */
|
2014-11-25 16:45:37 +00:00
|
|
|
long s_es_nr_inode;
|
2014-09-02 02:26:49 +00:00
|
|
|
struct ext4_es_stats s_es_stats;
|
2017-06-22 15:28:55 +00:00
|
|
|
struct mb_cache *s_ea_block_cache;
|
2017-06-22 15:44:55 +00:00
|
|
|
struct mb_cache *s_ea_inode_cache;
|
2014-11-25 16:45:37 +00:00
|
|
|
spinlock_t s_es_lock ____cacheline_aligned_in_smp;
|
2013-10-18 01:11:01 +00:00
|
|
|
|
2021-08-16 09:57:04 +00:00
|
|
|
/* Journal triggers for checksum computation */
|
|
|
|
struct ext4_journal_trigger s_journal_triggers[EXT4_JOURNAL_TRIGGER_COUNT];
|
|
|
|
|
2013-10-18 01:11:01 +00:00
|
|
|
/* Ratelimit ext4 messages. */
|
|
|
|
struct ratelimit_state s_err_ratelimit_state;
|
|
|
|
struct ratelimit_state s_warning_ratelimit_state;
|
|
|
|
struct ratelimit_state s_msg_ratelimit_state;
|
2020-07-25 12:33:13 +00:00
|
|
|
atomic_t s_warning_count;
|
|
|
|
atomic_t s_msg_count;
|
2016-04-26 03:22:35 +00:00
|
|
|
|
fscrypt: handle test_dummy_encryption in more logical way
The behavior of the test_dummy_encryption mount option is that when a
new file (or directory or symlink) is created in an unencrypted
directory, it's automatically encrypted using a dummy encryption policy.
That's it; in particular, the encryption (or lack thereof) of existing
files (or directories or symlinks) doesn't change.
Unfortunately the implementation of test_dummy_encryption is a bit weird
and confusing. When test_dummy_encryption is enabled and a file is
being created in an unencrypted directory, we set up an encryption key
(->i_crypt_info) for the directory. This isn't actually used to do any
encryption, however, since the directory is still unencrypted! Instead,
->i_crypt_info is only used for inheriting the encryption policy.
One consequence of this is that the filesystem ends up providing a
"dummy context" (policy + nonce) instead of a "dummy policy". In
commit ed318a6cc0b6 ("fscrypt: support test_dummy_encryption=v2"), I
mistakenly thought this was required. However, actually the nonce only
ends up being used to derive a key that is never used.
Another consequence of this implementation is that it allows for
'inode->i_crypt_info != NULL && !IS_ENCRYPTED(inode)', which is an edge
case that can be forgotten about. For example, currently
FS_IOC_GET_ENCRYPTION_POLICY on an unencrypted directory may return the
dummy encryption policy when the filesystem is mounted with
test_dummy_encryption. That seems like the wrong thing to do, since
again, the directory itself is not actually encrypted.
Therefore, switch to a more logical and maintainable implementation
where the dummy encryption policy inheritance is done without setting up
keys for unencrypted directories. This involves:
- Adding a function fscrypt_policy_to_inherit() which returns the
encryption policy to inherit from a directory. This can be a real
policy, a dummy policy, or no policy.
- Replacing struct fscrypt_dummy_context, ->get_dummy_context(), etc.
with struct fscrypt_dummy_policy, ->get_dummy_policy(), etc.
- Making fscrypt_fname_encrypted_size() take an fscrypt_policy instead
of an inode.
Acked-by: Jaegeuk Kim <jaegeuk@kernel.org>
Acked-by: Jeff Layton <jlayton@kernel.org>
Link: https://lore.kernel.org/r/20200917041136.178600-13-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-09-17 04:11:35 +00:00
|
|
|
/* Encryption policy for '-o test_dummy_encryption' */
|
|
|
|
struct fscrypt_dummy_policy s_dummy_enc_policy;
|
fscrypt: support test_dummy_encryption=v2
v1 encryption policies are deprecated in favor of v2, and some new
features (e.g. encryption+casefolding) are only being added for v2.
Therefore, the "test_dummy_encryption" mount option (which is used for
encryption I/O testing with xfstests) needs to support v2 policies.
To do this, extend its syntax to be "test_dummy_encryption=v1" or
"test_dummy_encryption=v2". The existing "test_dummy_encryption" (no
argument) also continues to be accepted, to specify the default setting
-- currently v1, but the next patch changes it to v2.
To cleanly support both v1 and v2 while also making it easy to support
specifying other encryption settings in the future (say, accepting
"$contents_mode:$filenames_mode:v2"), make ext4 and f2fs maintain a
pointer to the dummy fscrypt_context rather than using mount flags.
To avoid concurrency issues, don't allow test_dummy_encryption to be set
or changed during a remount. (The former restriction is new, but
xfstests doesn't run into it, so no one should notice.)
Tested with 'gce-xfstests -c {ext4,f2fs}/encrypt -g auto'. On ext4,
there are two regressions, both of which are test bugs: ext4/023 and
ext4/028 fail because they set an xattr and expect it to be stored
inline, but the increase in size of the fscrypt_context from
24 to 40 bytes causes this xattr to be spilled into an external block.
Link: https://lore.kernel.org/r/20200512233251.118314-4-ebiggers@kernel.org
Acked-by: Jaegeuk Kim <jaegeuk@kernel.org>
Reviewed-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Eric Biggers <ebiggers@google.com>
2020-05-12 23:32:50 +00:00
|
|
|
|
2020-02-19 18:30:47 +00:00
|
|
|
/*
|
|
|
|
* Barrier between writepages ops and changing any inode's JOURNAL_DATA
|
2023-05-24 07:25:38 +00:00
|
|
|
* or EXTENTS flag or between writepages ops and changing DELALLOC or
|
|
|
|
* DIOREAD_NOLOCK mount options on remount.
|
2020-02-19 18:30:47 +00:00
|
|
|
*/
|
2020-02-19 18:30:46 +00:00
|
|
|
struct percpu_rw_semaphore s_writepages_rwsem;
|
2017-08-24 23:42:48 +00:00
|
|
|
struct dax_device *s_daxdev;
|
2021-11-29 10:21:59 +00:00
|
|
|
u64 s_dax_part_off;
|
2019-11-21 18:09:43 +00:00
|
|
|
#ifdef CONFIG_EXT4_DEBUG
|
|
|
|
unsigned long s_simulate_fail;
|
|
|
|
#endif
|
2020-06-20 02:54:23 +00:00
|
|
|
/* Record the errseq of the backing block device */
|
|
|
|
errseq_t s_bdev_wb_err;
|
|
|
|
spinlock_t s_bdev_wb_lock;
|
2020-10-15 20:37:57 +00:00
|
|
|
|
2020-11-27 11:34:00 +00:00
|
|
|
/* Information about errors that happened during this mount */
|
|
|
|
spinlock_t s_error_lock;
|
|
|
|
int s_add_error_count;
|
|
|
|
int s_first_error_code;
|
|
|
|
__u32 s_first_error_line;
|
|
|
|
__u32 s_first_error_ino;
|
|
|
|
__u64 s_first_error_block;
|
|
|
|
const char *s_first_error_func;
|
|
|
|
time64_t s_first_error_time;
|
|
|
|
int s_last_error_code;
|
|
|
|
__u32 s_last_error_line;
|
|
|
|
__u32 s_last_error_ino;
|
|
|
|
__u64 s_last_error_block;
|
|
|
|
const char *s_last_error_func;
|
|
|
|
time64_t s_last_error_time;
|
|
|
|
/*
|
2023-08-23 03:43:38 +00:00
|
|
|
* If we are in a context where we cannot update the on-disk
|
|
|
|
* superblock, we queue the work here. This is used to update
|
|
|
|
* the error information in the superblock, and for periodic
|
|
|
|
* updates of the superblock called from the commit callback
|
|
|
|
* function.
|
2020-11-27 11:34:00 +00:00
|
|
|
*/
|
2023-08-23 03:43:38 +00:00
|
|
|
struct work_struct s_sb_upd_work;
|
2020-11-27 11:34:00 +00:00
|
|
|
|
2021-12-23 20:21:38 +00:00
|
|
|
/* Ext4 fast commit sub transaction ID */
|
2020-10-15 20:37:57 +00:00
|
|
|
atomic_t s_fc_subtid;
|
2021-12-23 20:21:38 +00:00
|
|
|
|
2020-10-15 20:37:57 +00:00
|
|
|
/*
|
|
|
|
* After commit starts, the main queue gets locked, and the further
|
|
|
|
* updates get added in the staging queue.
|
|
|
|
*/
|
|
|
|
#define FC_Q_MAIN 0
|
|
|
|
#define FC_Q_STAGING 1
|
|
|
|
struct list_head s_fc_q[2]; /* Inodes staged for fast commit
|
|
|
|
* that have data changes in them.
|
|
|
|
*/
|
|
|
|
struct list_head s_fc_dentry_q[2]; /* directory entry updates */
|
|
|
|
unsigned int s_fc_bytes;
|
|
|
|
/*
|
|
|
|
* Main fast commit lock. This lock protects accesses to the
|
|
|
|
* following fields:
|
|
|
|
* ei->i_fc_list, s_fc_dentry_q, s_fc_q, s_fc_bytes, s_fc_bh.
|
|
|
|
*/
|
|
|
|
spinlock_t s_fc_lock;
|
|
|
|
struct buffer_head *s_fc_bh;
|
|
|
|
struct ext4_fc_stats s_fc_stats;
|
2022-01-17 09:36:54 +00:00
|
|
|
tid_t s_fc_ineligible_tid;
|
2020-10-15 20:37:59 +00:00
|
|
|
#ifdef CONFIG_EXT4_DEBUG
|
|
|
|
int s_fc_debug_max_replay;
|
|
|
|
#endif
|
|
|
|
struct ext4_fc_replay_state s_fc_replay_state;
|
2009-05-03 20:33:44 +00:00
|
|
|
};
|
|
|
|
|
2008-09-09 02:25:24 +00:00
|
|
|
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
|
|
|
return sb->s_fs_info;
|
|
|
|
}
|
2006-10-11 08:20:53 +00:00
|
|
|
static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
2006-10-11 08:20:53 +00:00
|
|
|
return container_of(inode, struct ext4_inode_info, vfs_inode);
|
2006-10-11 08:20:50 +00:00
|
|
|
}
|
|
|
|
|
2023-05-04 12:47:23 +00:00
|
|
|
static inline int ext4_writepages_down_read(struct super_block *sb)
|
|
|
|
{
|
|
|
|
percpu_down_read(&EXT4_SB(sb)->s_writepages_rwsem);
|
|
|
|
return memalloc_nofs_save();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_writepages_up_read(struct super_block *sb, int ctx)
|
|
|
|
{
|
|
|
|
memalloc_nofs_restore(ctx);
|
|
|
|
percpu_up_read(&EXT4_SB(sb)->s_writepages_rwsem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ext4_writepages_down_write(struct super_block *sb)
|
|
|
|
{
|
|
|
|
percpu_down_write(&EXT4_SB(sb)->s_writepages_rwsem);
|
|
|
|
return memalloc_nofs_save();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_writepages_up_write(struct super_block *sb, int ctx)
|
|
|
|
{
|
|
|
|
memalloc_nofs_restore(ctx);
|
|
|
|
percpu_up_write(&EXT4_SB(sb)->s_writepages_rwsem);
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
2006-10-11 08:20:53 +00:00
|
|
|
return ino == EXT4_ROOT_INO ||
|
|
|
|
(ino >= EXT4_FIRST_INO(sb) &&
|
|
|
|
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
|
2006-10-11 08:20:50 +00:00
|
|
|
}
|
2010-01-24 19:34:07 +00:00
|
|
|
|
2020-02-15 21:40:37 +00:00
|
|
|
/*
|
|
|
|
* Returns: sbi->field[index]
|
|
|
|
* Used to access an array element from the following sbi fields which require
|
|
|
|
* rcu protection to avoid dereferencing an invalid pointer due to reassignment
|
|
|
|
* - s_group_desc
|
|
|
|
* - s_group_info
|
|
|
|
* - s_flex_group
|
|
|
|
*/
|
|
|
|
#define sbi_array_rcu_deref(sbi, field, index) \
|
|
|
|
({ \
|
|
|
|
typeof(*((sbi)->field)) _v; \
|
|
|
|
rcu_read_lock(); \
|
|
|
|
_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
|
|
|
|
rcu_read_unlock(); \
|
|
|
|
_v; \
|
|
|
|
})
|
|
|
|
|
2020-11-06 03:59:09 +00:00
|
|
|
/*
|
|
|
|
* run-time mount flags
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
EXT4_MF_MNTDIR_SAMPLED,
|
2022-01-17 09:36:55 +00:00
|
|
|
EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
|
2020-11-06 03:59:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
|
|
|
|
{
|
|
|
|
set_bit(bit, &EXT4_SB(sb)->s_mount_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_clear_mount_flag(struct super_block *sb, int bit)
|
|
|
|
{
|
|
|
|
clear_bit(bit, &EXT4_SB(sb)->s_mount_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int ext4_test_mount_flag(struct super_block *sb, int bit)
|
|
|
|
{
|
|
|
|
return test_bit(bit, &EXT4_SB(sb)->s_mount_flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-11-21 18:09:43 +00:00
|
|
|
/*
|
|
|
|
* Simulate_fail codes
|
|
|
|
*/
|
|
|
|
#define EXT4_SIM_BBITMAP_EIO 1
|
|
|
|
#define EXT4_SIM_BBITMAP_CRC 2
|
|
|
|
#define EXT4_SIM_IBITMAP_EIO 3
|
|
|
|
#define EXT4_SIM_IBITMAP_CRC 4
|
|
|
|
#define EXT4_SIM_INODE_EIO 5
|
|
|
|
#define EXT4_SIM_INODE_CRC 6
|
|
|
|
#define EXT4_SIM_DIRBLOCK_EIO 7
|
|
|
|
#define EXT4_SIM_DIRBLOCK_CRC 8
|
|
|
|
|
|
|
|
static inline bool ext4_simulate_fail(struct super_block *sb,
|
|
|
|
unsigned long code)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_EXT4_DEBUG
|
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
|
|
|
|
if (unlikely(sbi->s_simulate_fail == code)) {
|
|
|
|
sbi->s_simulate_fail = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_simulate_fail_bh(struct super_block *sb,
|
|
|
|
struct buffer_head *bh,
|
|
|
|
unsigned long code)
|
|
|
|
{
|
|
|
|
if (!IS_ERR(bh) && ext4_simulate_fail(sb, code))
|
|
|
|
clear_buffer_uptodate(bh);
|
|
|
|
}
|
|
|
|
|
2019-11-20 02:54:15 +00:00
|
|
|
/*
|
|
|
|
* Error number codes for s_{first,last}_error_errno
|
|
|
|
*
|
|
|
|
* Linux errno numbers are architecture specific, so we need to translate
|
|
|
|
* them into something which is architecture independent. We don't define
|
|
|
|
* codes for all errno's; just the ones which are most likely to be the cause
|
|
|
|
* of an ext4_error() call.
|
|
|
|
*/
|
|
|
|
#define EXT4_ERR_UNKNOWN 1
|
|
|
|
#define EXT4_ERR_EIO 2
|
|
|
|
#define EXT4_ERR_ENOMEM 3
|
|
|
|
#define EXT4_ERR_EFSBADCRC 4
|
|
|
|
#define EXT4_ERR_EFSCORRUPTED 5
|
|
|
|
#define EXT4_ERR_ENOSPC 6
|
|
|
|
#define EXT4_ERR_ENOKEY 7
|
|
|
|
#define EXT4_ERR_EROFS 8
|
|
|
|
#define EXT4_ERR_EFBIG 9
|
|
|
|
#define EXT4_ERR_EEXIST 10
|
|
|
|
#define EXT4_ERR_ERANGE 11
|
|
|
|
#define EXT4_ERR_EOVERFLOW 12
|
|
|
|
#define EXT4_ERR_EBUSY 13
|
|
|
|
#define EXT4_ERR_ENOTDIR 14
|
|
|
|
#define EXT4_ERR_ENOTEMPTY 15
|
|
|
|
#define EXT4_ERR_ESHUTDOWN 16
|
|
|
|
#define EXT4_ERR_EFAULT 17
|
|
|
|
|
2010-01-24 19:34:07 +00:00
|
|
|
/*
|
|
|
|
* Inode dynamic state flags
|
|
|
|
*/
|
|
|
|
enum {
|
|
|
|
EXT4_STATE_NEW, /* inode is newly created */
|
|
|
|
EXT4_STATE_XATTR, /* has in-inode xattrs */
|
|
|
|
EXT4_STATE_NO_EXPAND, /* No space for expansion */
|
|
|
|
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
|
|
|
|
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
|
2010-05-17 12:00:00 +00:00
|
|
|
EXT4_STATE_NEWENTRY, /* File just added to dir */
|
2012-12-10 19:04:46 +00:00
|
|
|
EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
|
2013-08-17 02:05:14 +00:00
|
|
|
EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
|
2017-08-24 18:25:02 +00:00
|
|
|
EXT4_STATE_LUSTRE_EA_INODE, /* Lustre-style ea_inode */
|
2019-07-22 16:26:24 +00:00
|
|
|
EXT4_STATE_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
|
2020-10-15 20:37:57 +00:00
|
|
|
EXT4_STATE_FC_COMMITTING, /* Fast commit ongoing */
|
2021-08-16 09:57:06 +00:00
|
|
|
EXT4_STATE_ORPHAN_FILE, /* Inode orphaned in orphan file */
|
2010-01-24 19:34:07 +00:00
|
|
|
};
|
|
|
|
|
2011-01-10 17:18:25 +00:00
|
|
|
#define EXT4_INODE_BIT_FNS(name, field, offset) \
|
2010-05-17 02:00:00 +00:00
|
|
|
static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
|
|
|
|
{ \
|
2011-01-10 17:18:25 +00:00
|
|
|
return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
2010-05-17 02:00:00 +00:00
|
|
|
} \
|
|
|
|
static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
|
|
|
|
{ \
|
2011-01-10 17:18:25 +00:00
|
|
|
set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
2010-05-17 02:00:00 +00:00
|
|
|
} \
|
|
|
|
static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
|
|
|
|
{ \
|
2011-01-10 17:18:25 +00:00
|
|
|
clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \
|
2010-01-24 19:34:07 +00:00
|
|
|
}
|
|
|
|
|
2013-11-12 03:40:40 +00:00
|
|
|
/* Add these declarations here only so that these functions can be
|
|
|
|
* found by name. Otherwise, they are very hard to locate. */
|
|
|
|
static inline int ext4_test_inode_flag(struct inode *inode, int bit);
|
|
|
|
static inline void ext4_set_inode_flag(struct inode *inode, int bit);
|
|
|
|
static inline void ext4_clear_inode_flag(struct inode *inode, int bit);
|
2011-01-10 17:18:25 +00:00
|
|
|
EXT4_INODE_BIT_FNS(flag, flags, 0)
|
2013-11-12 03:40:40 +00:00
|
|
|
|
|
|
|
/* Add these declarations here only so that these functions can be
|
|
|
|
* found by name. Otherwise, they are very hard to locate. */
|
|
|
|
static inline int ext4_test_inode_state(struct inode *inode, int bit);
|
|
|
|
static inline void ext4_set_inode_state(struct inode *inode, int bit);
|
|
|
|
static inline void ext4_clear_inode_state(struct inode *inode, int bit);
|
2011-01-10 17:18:25 +00:00
|
|
|
#if (BITS_PER_LONG < 64)
|
|
|
|
EXT4_INODE_BIT_FNS(state, state_flags, 0)
|
|
|
|
|
|
|
|
static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
|
|
|
{
|
|
|
|
(ei)->i_state_flags = 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
EXT4_INODE_BIT_FNS(state, flags, 32)
|
|
|
|
|
|
|
|
static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
|
|
|
|
{
|
|
|
|
/* We depend on the fact that callers will set i_flags */
|
|
|
|
}
|
|
|
|
#endif
|
2006-10-11 08:20:50 +00:00
|
|
|
#else
|
2006-10-11 08:20:53 +00:00
|
|
|
/* Assume that user mode programs are passing in an ext4fs superblock, not
|
2006-10-11 08:20:50 +00:00
|
|
|
* a kernel struct super_block. This will allow us to call the feature-test
|
|
|
|
* macros from user land. */
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_SB(sb) (sb)
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif
|
|
|
|
|
2019-07-22 16:26:24 +00:00
|
|
|
static inline bool ext4_verity_in_progress(struct inode *inode)
|
|
|
|
{
|
|
|
|
return IS_ENABLED(CONFIG_FS_VERITY) &&
|
|
|
|
ext4_test_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Codes for operating systems
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_OS_LINUX 0
|
|
|
|
#define EXT4_OS_HURD 1
|
|
|
|
#define EXT4_OS_MASIX 2
|
|
|
|
#define EXT4_OS_FREEBSD 3
|
|
|
|
#define EXT4_OS_LITES 4
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Revision levels
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_GOOD_OLD_REV 0 /* The good old (original) format */
|
|
|
|
#define EXT4_DYNAMIC_REV 1 /* V2 format w/ dynamic inode sizes */
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_MAX_SUPP_REV EXT4_DYNAMIC_REV
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_GOOD_OLD_INODE_SIZE 128
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2018-01-22 02:04:24 +00:00
|
|
|
#define EXT4_EXTRA_TIMESTAMP_MAX (((s64)1 << 34) - 1 + S32_MIN)
|
|
|
|
#define EXT4_NON_EXTRA_TIMESTAMP_MAX S32_MAX
|
|
|
|
#define EXT4_TIMESTAMP_MIN S32_MIN
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Feature set definitions
|
|
|
|
*/
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_DIR_PREALLOC 0x0001
|
|
|
|
#define EXT4_FEATURE_COMPAT_IMAGIC_INODES 0x0002
|
|
|
|
#define EXT4_FEATURE_COMPAT_HAS_JOURNAL 0x0004
|
|
|
|
#define EXT4_FEATURE_COMPAT_EXT_ATTR 0x0008
|
|
|
|
#define EXT4_FEATURE_COMPAT_RESIZE_INODE 0x0010
|
|
|
|
#define EXT4_FEATURE_COMPAT_DIR_INDEX 0x0020
|
2014-05-12 14:16:06 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_SPARSE_SUPER2 0x0200
|
2020-11-06 03:58:50 +00:00
|
|
|
/*
|
|
|
|
* The reason why "FAST_COMMIT" is a compat feature is that, FS becomes
|
|
|
|
* incompatible only if fast commit blocks are present in the FS. Since we
|
|
|
|
* clear the journal (and thus the fast commit blocks), we don't mark FS as
|
|
|
|
* incompatible. We also have a JBD2 incompat feature, which gets set when
|
|
|
|
* there are fast commit blocks present in the journal.
|
|
|
|
*/
|
2020-10-15 20:37:54 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_FAST_COMMIT 0x0400
|
2019-10-24 21:54:37 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_STABLE_INODES 0x0800
|
2021-08-16 09:57:06 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_ORPHAN_FILE 0x1000 /* Orphan file exists */
|
2006-10-11 08:20:53 +00:00
|
|
|
|
|
|
|
#define EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER 0x0001
|
|
|
|
#define EXT4_FEATURE_RO_COMPAT_LARGE_FILE 0x0002
|
|
|
|
#define EXT4_FEATURE_RO_COMPAT_BTREE_DIR 0x0004
|
2008-01-29 04:58:26 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_HUGE_FILE 0x0008
|
Ext4: Uninitialized Block Groups
In pass1 of e2fsck, every inode table in the fileystem is scanned and checked,
regardless of whether it is in use. This is this the most time consuming part
of the filesystem check. The unintialized block group feature can greatly
reduce e2fsck time by eliminating checking of uninitialized inodes.
With this feature, there is a a high water mark of used inodes for each block
group. Block and inode bitmaps can be uninitialized on disk via a flag in the
group descriptor to avoid reading or scanning them at e2fsck time. A checksum
of each group descriptor is used to ensure that corruption in the group
descriptor's bit flags does not cause incorrect operation.
The feature is enabled through a mkfs option
mke2fs /dev/ -O uninit_groups
A patch adding support for uninitialized block groups to e2fsprogs tools has
been posted to the linux-ext4 mailing list.
The patches have been stress tested with fsstress and fsx. In performance
tests testing e2fsck time, we have seen that e2fsck time on ext3 grows
linearly with the total number of inodes in the filesytem. In ext4 with the
uninitialized block groups feature, the e2fsck time is constant, based
solely on the number of used inodes rather than the total inode count.
Since typical ext4 filesystems only use 1-10% of their inodes, this feature can
greatly reduce e2fsck time for users. With performance improvement of 2-20
times, depending on how full the filesystem is.
The attached graph shows the major improvements in e2fsck times in filesystems
with a large total inode count, but few inodes in use.
In each group descriptor if we have
EXT4_BG_INODE_UNINIT set in bg_flags:
Inode table is not initialized/used in this group. So we can skip
the consistency check during fsck.
EXT4_BG_BLOCK_UNINIT set in bg_flags:
No block in the group is used. So we can skip the block bitmap
verification for this group.
We also add two new fields to group descriptor as a part of
uninitialized group patch.
__le16 bg_itable_unused; /* Unused inodes count */
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
bg_itable_unused:
If we have EXT4_BG_INODE_UNINIT not set in bg_flags
then bg_itable_unused will give the offset within
the inode table till the inodes are used. This can be
used by fsck to skip list of inodes that are marked unused.
bg_checksum:
Now that we depend on bg_flags and bg_itable_unused to determine
the block and inode usage, we need to make sure group descriptor
is not corrupt. We add checksum to group descriptor to
detect corruption. If the descriptor is found to be corrupt, we
mark all the blocks and inodes in the group used.
Signed-off-by: Avantika Mathur <mathur@us.ibm.com>
Signed-off-by: Andreas Dilger <adilger@clusterfs.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
2007-10-16 22:38:25 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010
|
2007-07-18 12:38:01 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020
|
2007-07-18 13:15:20 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
|
2011-05-24 23:00:39 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
|
2011-09-09 22:34:51 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200
|
2012-04-29 22:23:10 +00:00
|
|
|
/*
|
|
|
|
* METADATA_CSUM also enables group descriptor checksums (GDT_CSUM). When
|
|
|
|
* METADATA_CSUM is set, group descriptor checksums use the same algorithm as
|
|
|
|
* all other data structures' checksums. However, the METADATA_CSUM and
|
|
|
|
* GDT_CSUM bits are mutually exclusive.
|
|
|
|
*/
|
2012-01-05 03:01:53 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
|
2015-02-13 03:31:21 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000
|
2015-10-17 20:15:18 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_PROJECT 0x2000
|
2019-07-22 16:26:24 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_VERITY 0x8000
|
2021-08-16 09:57:06 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_ORPHAN_PRESENT 0x10000 /* Orphan file may be
|
|
|
|
non-empty */
|
2006-10-11 08:20:53 +00:00
|
|
|
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_RECOVER 0x0004 /* Needs recovery */
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_JOURNAL_DEV 0x0008 /* Journal device */
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_META_BG 0x0010
|
2006-10-11 08:21:03 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_EXTENTS 0x0040 /* extents support */
|
2006-10-11 08:21:10 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_64BIT 0x0080
|
2008-01-29 04:58:26 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_MMP 0x0100
|
2007-10-16 22:38:25 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_FLEX_BG 0x0200
|
2010-01-25 08:31:32 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */
|
2015-10-17 20:16:02 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_CSUM_SEED 0x2000
|
2012-01-05 03:01:53 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */
|
2012-12-10 19:04:46 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
|
2015-01-19 21:00:58 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
|
2019-04-25 18:05:42 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_CASEFOLD 0x20000
|
2006-10-11 08:20:53 +00:00
|
|
|
|
2019-02-14 22:52:18 +00:00
|
|
|
extern void ext4_update_dynamic_rev(struct super_block *sb);
|
|
|
|
|
2015-10-17 20:18:43 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
|
|
|
|
static inline bool ext4_has_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_compat & \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname)) != 0); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_set_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
2019-02-14 22:52:18 +00:00
|
|
|
ext4_update_dynamic_rev(sb); \
|
2015-10-17 20:18:43 +00:00
|
|
|
EXT4_SB(sb)->s_es->s_feature_compat |= \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_clear_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
EXT4_SB(sb)->s_es->s_feature_compat &= \
|
|
|
|
~cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define EXT4_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
|
|
|
|
static inline bool ext4_has_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_ro_compat & \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname)) != 0); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_set_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
2019-02-14 22:52:18 +00:00
|
|
|
ext4_update_dynamic_rev(sb); \
|
2015-10-17 20:18:43 +00:00
|
|
|
EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_clear_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
EXT4_SB(sb)->s_es->s_feature_ro_compat &= \
|
|
|
|
~cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define EXT4_FEATURE_INCOMPAT_FUNCS(name, flagname) \
|
|
|
|
static inline bool ext4_has_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_incompat & \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname)) != 0); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_set_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
2019-02-14 22:52:18 +00:00
|
|
|
ext4_update_dynamic_rev(sb); \
|
2015-10-17 20:18:43 +00:00
|
|
|
EXT4_SB(sb)->s_es->s_feature_incompat |= \
|
|
|
|
cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
|
|
|
|
} \
|
|
|
|
static inline void ext4_clear_feature_##name(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
EXT4_SB(sb)->s_es->s_feature_incompat &= \
|
|
|
|
~cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
|
|
|
|
}
|
|
|
|
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(dir_prealloc, DIR_PREALLOC)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(imagic_inodes, IMAGIC_INODES)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(journal, HAS_JOURNAL)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(xattr, EXT_ATTR)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(resize_inode, RESIZE_INODE)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(dir_index, DIR_INDEX)
|
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(sparse_super2, SPARSE_SUPER2)
|
2020-10-15 20:37:54 +00:00
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(fast_commit, FAST_COMMIT)
|
2019-10-24 21:54:37 +00:00
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(stable_inodes, STABLE_INODES)
|
2021-08-16 09:57:06 +00:00
|
|
|
EXT4_FEATURE_COMPAT_FUNCS(orphan_file, ORPHAN_FILE)
|
2015-10-17 20:18:43 +00:00
|
|
|
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(sparse_super, SPARSE_SUPER)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(large_file, LARGE_FILE)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(btree_dir, BTREE_DIR)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(huge_file, HUGE_FILE)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(gdt_csum, GDT_CSUM)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(dir_nlink, DIR_NLINK)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(extra_isize, EXTRA_ISIZE)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(quota, QUOTA)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(bigalloc, BIGALLOC)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(metadata_csum, METADATA_CSUM)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(readonly, READONLY)
|
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(project, PROJECT)
|
2019-07-22 16:26:24 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(verity, VERITY)
|
2021-08-16 09:57:06 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_FUNCS(orphan_present, ORPHAN_PRESENT)
|
2015-10-17 20:18:43 +00:00
|
|
|
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(compression, COMPRESSION)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(filetype, FILETYPE)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(journal_needs_recovery, RECOVER)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(journal_dev, JOURNAL_DEV)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(meta_bg, META_BG)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(extents, EXTENTS)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(64bit, 64BIT)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(mmp, MMP)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(flex_bg, FLEX_BG)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(ea_inode, EA_INODE)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(dirdata, DIRDATA)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(csum_seed, CSUM_SEED)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(largedir, LARGEDIR)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(inline_data, INLINE_DATA)
|
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(encrypt, ENCRYPT)
|
2019-04-25 18:05:42 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_FUNCS(casefold, CASEFOLD)
|
2015-10-17 20:18:43 +00:00
|
|
|
|
2011-04-18 21:29:14 +00:00
|
|
|
#define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
|
|
|
|
#define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
|
|
|
|
EXT4_FEATURE_INCOMPAT_META_BG)
|
|
|
|
#define EXT2_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
|
|
|
|
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
|
|
|
|
EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
|
|
|
|
|
|
|
|
#define EXT3_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR
|
|
|
|
#define EXT3_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
|
|
|
|
EXT4_FEATURE_INCOMPAT_RECOVER| \
|
|
|
|
EXT4_FEATURE_INCOMPAT_META_BG)
|
|
|
|
#define EXT3_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
|
|
|
|
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
|
|
|
|
EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
|
|
|
|
|
2021-08-16 09:57:06 +00:00
|
|
|
#define EXT4_FEATURE_COMPAT_SUPP (EXT4_FEATURE_COMPAT_EXT_ATTR| \
|
|
|
|
EXT4_FEATURE_COMPAT_ORPHAN_FILE)
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \
|
|
|
|
EXT4_FEATURE_INCOMPAT_RECOVER| \
|
2006-10-11 08:21:03 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_META_BG| \
|
2006-10-11 08:21:10 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_EXTENTS| \
|
2007-10-16 22:38:25 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_64BIT| \
|
2011-05-24 22:31:25 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
|
2017-06-22 01:10:32 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_EA_INODE| \
|
2015-04-16 05:56:00 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_MMP | \
|
|
|
|
EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
|
2015-10-17 20:16:02 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_ENCRYPT | \
|
2019-04-25 18:05:42 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_CASEFOLD | \
|
2017-06-22 01:09:57 +00:00
|
|
|
EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
|
|
|
|
EXT4_FEATURE_INCOMPAT_LARGEDIR)
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
|
|
|
|
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
|
Ext4: Uninitialized Block Groups
In pass1 of e2fsck, every inode table in the fileystem is scanned and checked,
regardless of whether it is in use. This is this the most time consuming part
of the filesystem check. The unintialized block group feature can greatly
reduce e2fsck time by eliminating checking of uninitialized inodes.
With this feature, there is a a high water mark of used inodes for each block
group. Block and inode bitmaps can be uninitialized on disk via a flag in the
group descriptor to avoid reading or scanning them at e2fsck time. A checksum
of each group descriptor is used to ensure that corruption in the group
descriptor's bit flags does not cause incorrect operation.
The feature is enabled through a mkfs option
mke2fs /dev/ -O uninit_groups
A patch adding support for uninitialized block groups to e2fsprogs tools has
been posted to the linux-ext4 mailing list.
The patches have been stress tested with fsstress and fsx. In performance
tests testing e2fsck time, we have seen that e2fsck time on ext3 grows
linearly with the total number of inodes in the filesytem. In ext4 with the
uninitialized block groups feature, the e2fsck time is constant, based
solely on the number of used inodes rather than the total inode count.
Since typical ext4 filesystems only use 1-10% of their inodes, this feature can
greatly reduce e2fsck time for users. With performance improvement of 2-20
times, depending on how full the filesystem is.
The attached graph shows the major improvements in e2fsck times in filesystems
with a large total inode count, but few inodes in use.
In each group descriptor if we have
EXT4_BG_INODE_UNINIT set in bg_flags:
Inode table is not initialized/used in this group. So we can skip
the consistency check during fsck.
EXT4_BG_BLOCK_UNINIT set in bg_flags:
No block in the group is used. So we can skip the block bitmap
verification for this group.
We also add two new fields to group descriptor as a part of
uninitialized group patch.
__le16 bg_itable_unused; /* Unused inodes count */
__le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
bg_itable_unused:
If we have EXT4_BG_INODE_UNINIT not set in bg_flags
then bg_itable_unused will give the offset within
the inode table till the inodes are used. This can be
used by fsck to skip list of inodes that are marked unused.
bg_checksum:
Now that we depend on bg_flags and bg_itable_unused to determine
the block and inode usage, we need to make sure group descriptor
is not corrupt. We add checksum to group descriptor to
detect corruption. If the descriptor is found to be corrupt, we
mark all the blocks and inodes in the group used.
Signed-off-by: Avantika Mathur <mathur@us.ibm.com>
Signed-off-by: Andreas Dilger <adilger@clusterfs.com>
Signed-off-by: Mingming Cao <cmm@us.ibm.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
2007-10-16 22:38:25 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
|
2007-07-18 12:38:01 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_DIR_NLINK | \
|
2007-07-18 13:15:20 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
|
2008-01-29 04:58:26 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
|
2011-09-09 23:06:51 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
|
2012-05-27 12:12:42 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
|
ext4: make quota as first class supported feature
This patch adds support for quotas as a first class feature in ext4;
which is to say, the quota files are stored in hidden inodes as file
system metadata, instead of as separate files visible in the file system
directory hierarchy.
It is based on the proposal at:
https://ext4.wiki.kernel.org/index.php/Design_For_1st_Class_Quota_in_Ext4
This patch introduces a new feature - EXT4_FEATURE_RO_COMPAT_QUOTA
which, when turned on, enables quota accounting at mount time
iteself. Also, the quota inodes are stored in two additional superblock
fields. Some changes introduced by this patch that should be pointed
out are:
1) Two new ext4-superblock fields - s_usr_quota_inum and
s_grp_quota_inum for storing the quota inodes in use.
2) Default quota inodes are: inode#3 for tracking userquota and inode#4
for tracking group quota. The superblock fields can be set to use
other inodes as well.
3) If the QUOTA feature and corresponding quota inodes are set in
superblock, the quota usage tracking is turned on at mount time. On
'quotaon' ioctl, the quota limits enforcement is turned
on. 'quotaoff' ioctl turns off only the limits enforcement in this
case.
4) When QUOTA feature is in use, the quota mount options 'quota',
'usrquota', 'grpquota' are ignored by the kernel.
5) mke2fs or tune2fs can be used to set the QUOTA feature and initialize
quota inodes. The default reserved inodes will not be visible to user
as regular files.
6) The quota-tools will need to be modified to support hidden quota
files on ext4. E2fsprogs will also include support for creating and
fixing quota files.
7) Support is only for the new V2 quota file format.
Tested-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Johann Lombardi <johann@whamcloud.com>
Signed-off-by: Aditya Kali <adityakali@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2012-07-23 00:21:31 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM|\
|
2016-01-08 21:01:21 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_QUOTA |\
|
2019-07-22 16:26:24 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_PROJECT |\
|
2021-08-16 09:57:06 +00:00
|
|
|
EXT4_FEATURE_RO_COMPAT_VERITY |\
|
|
|
|
EXT4_FEATURE_RO_COMPAT_ORPHAN_PRESENT)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2015-10-17 20:18:43 +00:00
|
|
|
#define EXTN_FEATURE_FUNCS(ver) \
|
|
|
|
static inline bool ext4_has_unknown_ext##ver##_compat_features(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_compat & \
|
|
|
|
cpu_to_le32(~EXT##ver##_FEATURE_COMPAT_SUPP)) != 0); \
|
|
|
|
} \
|
|
|
|
static inline bool ext4_has_unknown_ext##ver##_ro_compat_features(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_ro_compat & \
|
|
|
|
cpu_to_le32(~EXT##ver##_FEATURE_RO_COMPAT_SUPP)) != 0); \
|
|
|
|
} \
|
|
|
|
static inline bool ext4_has_unknown_ext##ver##_incompat_features(struct super_block *sb) \
|
|
|
|
{ \
|
|
|
|
return ((EXT4_SB(sb)->s_es->s_feature_incompat & \
|
|
|
|
cpu_to_le32(~EXT##ver##_FEATURE_INCOMPAT_SUPP)) != 0); \
|
|
|
|
}
|
|
|
|
|
|
|
|
EXTN_FEATURE_FUNCS(2)
|
|
|
|
EXTN_FEATURE_FUNCS(3)
|
|
|
|
EXTN_FEATURE_FUNCS(4)
|
|
|
|
|
|
|
|
static inline bool ext4_has_compat_features(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return (EXT4_SB(sb)->s_es->s_feature_compat != 0);
|
|
|
|
}
|
|
|
|
static inline bool ext4_has_ro_compat_features(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return (EXT4_SB(sb)->s_es->s_feature_ro_compat != 0);
|
|
|
|
}
|
|
|
|
static inline bool ext4_has_incompat_features(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return (EXT4_SB(sb)->s_es->s_feature_incompat != 0);
|
|
|
|
}
|
|
|
|
|
2021-08-16 09:57:05 +00:00
|
|
|
extern int ext4_feature_set_ok(struct super_block *sb, int readonly);
|
|
|
|
|
2017-02-05 06:27:48 +00:00
|
|
|
/*
|
|
|
|
* Superblock flags
|
|
|
|
*/
|
|
|
|
#define EXT4_FLAGS_RESIZING 0
|
2017-02-05 06:28:48 +00:00
|
|
|
#define EXT4_FLAGS_SHUTDOWN 1
|
2020-05-28 14:59:58 +00:00
|
|
|
#define EXT4_FLAGS_BDEV_IS_DAX 2
|
2017-02-05 06:28:48 +00:00
|
|
|
|
2023-06-16 16:50:49 +00:00
|
|
|
static inline int ext4_forced_shutdown(struct super_block *sb)
|
2017-02-05 06:28:48 +00:00
|
|
|
{
|
2023-06-16 16:50:49 +00:00
|
|
|
return test_bit(EXT4_FLAGS_SHUTDOWN, &EXT4_SB(sb)->s_ext4_flags);
|
2017-02-05 06:28:48 +00:00
|
|
|
}
|
2017-02-05 06:27:48 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Default values for user and/or group using reserved blocks
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DEF_RESUID 0
|
|
|
|
#define EXT4_DEF_RESGID 0
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2016-01-08 21:01:21 +00:00
|
|
|
/*
|
|
|
|
* Default project ID
|
|
|
|
*/
|
|
|
|
#define EXT4_DEF_PROJID 0
|
|
|
|
|
2008-10-10 03:53:47 +00:00
|
|
|
#define EXT4_DEF_INODE_READAHEAD_BLKS 32
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Default mount options
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DEFM_DEBUG 0x0001
|
|
|
|
#define EXT4_DEFM_BSDGROUPS 0x0002
|
|
|
|
#define EXT4_DEFM_XATTR_USER 0x0004
|
|
|
|
#define EXT4_DEFM_ACL 0x0008
|
|
|
|
#define EXT4_DEFM_UID16 0x0010
|
|
|
|
#define EXT4_DEFM_JMODE 0x0060
|
|
|
|
#define EXT4_DEFM_JMODE_DATA 0x0020
|
|
|
|
#define EXT4_DEFM_JMODE_ORDERED 0x0040
|
|
|
|
#define EXT4_DEFM_JMODE_WBACK 0x0060
|
2010-08-02 03:14:20 +00:00
|
|
|
#define EXT4_DEFM_NOBARRIER 0x0100
|
|
|
|
#define EXT4_DEFM_BLOCK_VALIDITY 0x0200
|
|
|
|
#define EXT4_DEFM_DISCARD 0x0400
|
|
|
|
#define EXT4_DEFM_NODELALLOC 0x0800
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2009-01-04 01:27:38 +00:00
|
|
|
/*
|
|
|
|
* Default journal batch times
|
|
|
|
*/
|
|
|
|
#define EXT4_DEF_MIN_BATCH_TIME 0
|
|
|
|
#define EXT4_DEF_MAX_BATCH_TIME 15000 /* 15ms */
|
|
|
|
|
2009-03-12 16:18:34 +00:00
|
|
|
/*
|
|
|
|
* Minimum number of groups in a flexgroup before we separate out
|
|
|
|
* directories into the first block group of a flexgroup
|
|
|
|
*/
|
|
|
|
#define EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME 4
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Structure of a directory entry
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_NAME_LEN 255
|
ext4: fix use-after-free in ext4_search_dir
We got issue as follows:
EXT4-fs (loop0): mounted filesystem without journal. Opts: ,errors=continue
==================================================================
BUG: KASAN: use-after-free in ext4_search_dir fs/ext4/namei.c:1394 [inline]
BUG: KASAN: use-after-free in search_dirblock fs/ext4/namei.c:1199 [inline]
BUG: KASAN: use-after-free in __ext4_find_entry+0xdca/0x1210 fs/ext4/namei.c:1553
Read of size 1 at addr ffff8881317c3005 by task syz-executor117/2331
CPU: 1 PID: 2331 Comm: syz-executor117 Not tainted 5.10.0+ #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
Call Trace:
__dump_stack lib/dump_stack.c:83 [inline]
dump_stack+0x144/0x187 lib/dump_stack.c:124
print_address_description+0x7d/0x630 mm/kasan/report.c:387
__kasan_report+0x132/0x190 mm/kasan/report.c:547
kasan_report+0x47/0x60 mm/kasan/report.c:564
ext4_search_dir fs/ext4/namei.c:1394 [inline]
search_dirblock fs/ext4/namei.c:1199 [inline]
__ext4_find_entry+0xdca/0x1210 fs/ext4/namei.c:1553
ext4_lookup_entry fs/ext4/namei.c:1622 [inline]
ext4_lookup+0xb8/0x3a0 fs/ext4/namei.c:1690
__lookup_hash+0xc5/0x190 fs/namei.c:1451
do_rmdir+0x19e/0x310 fs/namei.c:3760
do_syscall_64+0x33/0x40 arch/x86/entry/common.c:46
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x445e59
Code: 4d c7 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 1b c7 fb ff c3 66 2e 0f 1f 84 00 00 00 00
RSP: 002b:00007fff2277fac8 EFLAGS: 00000246 ORIG_RAX: 0000000000000054
RAX: ffffffffffffffda RBX: 0000000000400280 RCX: 0000000000445e59
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00000000200000c0
RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000002
R10: 00007fff2277f990 R11: 0000000000000246 R12: 0000000000000000
R13: 431bde82d7b634db R14: 0000000000000000 R15: 0000000000000000
The buggy address belongs to the page:
page:0000000048cd3304 refcount:0 mapcount:0 mapping:0000000000000000 index:0x1 pfn:0x1317c3
flags: 0x200000000000000()
raw: 0200000000000000 ffffea0004526588 ffffea0004528088 0000000000000000
raw: 0000000000000001 0000000000000000 00000000ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8881317c2f00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
ffff8881317c2f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
>ffff8881317c3000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
^
ffff8881317c3080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
ffff8881317c3100: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff
==================================================================
ext4_search_dir:
...
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
...
if ((char *) de + de->name_len <= dlimit &&
ext4_match(dir, fname, de)) {
...
}
...
de_len = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize);
if (de_len <= 0)
return -1;
offset += de_len;
de = (struct ext4_dir_entry_2 *) ((char *) de + de_len);
}
Assume:
de=0xffff8881317c2fff
dlimit=0x0xffff8881317c3000
If read 'de->name_len' which address is 0xffff8881317c3005, obviously is
out of range, then will trigger use-after-free.
To solve this issue, 'dlimit' must reserve 8 bytes, as we will read
'de->name_len' to judge if '(char *) de + de->name_len' out of range.
Signed-off-by: Ye Bin <yebin10@huawei.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20220324064816.1209985-1-yebin10@huawei.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org
2022-03-24 06:48:16 +00:00
|
|
|
/*
|
|
|
|
* Base length of the ext4 directory entry excluding the name length
|
|
|
|
*/
|
|
|
|
#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_dir_entry {
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 inode; /* Inode number */
|
|
|
|
__le16 rec_len; /* Directory entry length */
|
|
|
|
__le16 name_len; /* Name length */
|
2006-10-11 08:20:53 +00:00
|
|
|
char name[EXT4_NAME_LEN]; /* File name */
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2021-03-19 07:34:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Encrypted Casefolded entries require saving the hash on disk. This structure
|
|
|
|
* followed ext4_dir_entry_2's name[name_len] at the next 4 byte aligned
|
|
|
|
* boundary.
|
|
|
|
*/
|
|
|
|
struct ext4_dir_entry_hash {
|
|
|
|
__le32 hash;
|
|
|
|
__le32 minor_hash;
|
|
|
|
};
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* The new version of the directory entry. Since EXT4 structures are
|
2006-10-11 08:20:50 +00:00
|
|
|
* stored in intel byte order, and the name_len field could never be
|
|
|
|
* bigger than 255 chars, it's safe to reclaim the extra byte for the
|
|
|
|
* file_type field.
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_dir_entry_2 {
|
2006-10-11 08:20:50 +00:00
|
|
|
__le32 inode; /* Inode number */
|
|
|
|
__le16 rec_len; /* Directory entry length */
|
|
|
|
__u8 name_len; /* Name length */
|
2020-05-22 15:07:58 +00:00
|
|
|
__u8 file_type; /* See file type macros EXT4_FT_* below */
|
2006-10-11 08:20:53 +00:00
|
|
|
char name[EXT4_NAME_LEN]; /* File name */
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2021-03-19 07:34:13 +00:00
|
|
|
/*
|
|
|
|
* Access the hashes at the end of ext4_dir_entry_2
|
|
|
|
*/
|
|
|
|
#define EXT4_DIRENT_HASHES(entry) \
|
|
|
|
((struct ext4_dir_entry_hash *) \
|
|
|
|
(((void *)(entry)) + \
|
|
|
|
((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND)))
|
|
|
|
#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash)
|
|
|
|
#define EXT4_DIRENT_MINOR_HASH(entry) \
|
|
|
|
le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash)
|
|
|
|
|
|
|
|
static inline bool ext4_hash_in_dirent(const struct inode *inode)
|
|
|
|
{
|
|
|
|
return IS_CASEFOLDED(inode) && IS_ENCRYPTED(inode);
|
|
|
|
}
|
|
|
|
|
2012-04-29 22:23:10 +00:00
|
|
|
/*
|
|
|
|
* This is a bogus directory entry at the end of each leaf block that
|
|
|
|
* records checksums.
|
|
|
|
*/
|
|
|
|
struct ext4_dir_entry_tail {
|
|
|
|
__le32 det_reserved_zero1; /* Pretend to be unused */
|
|
|
|
__le16 det_rec_len; /* 12 */
|
|
|
|
__u8 det_reserved_zero2; /* Zero name length */
|
|
|
|
__u8 det_reserved_ft; /* 0xDE, fake file type */
|
|
|
|
__le32 det_checksum; /* crc32c(uuid+inum+dirblock) */
|
|
|
|
};
|
|
|
|
|
2012-12-10 19:05:59 +00:00
|
|
|
#define EXT4_DIRENT_TAIL(block, blocksize) \
|
|
|
|
((struct ext4_dir_entry_tail *)(((void *)(block)) + \
|
|
|
|
((blocksize) - \
|
|
|
|
sizeof(struct ext4_dir_entry_tail))))
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* Ext4 directory file types. Only the low 3 bits are used. The
|
2006-10-11 08:20:50 +00:00
|
|
|
* other bits are reserved for now.
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FT_UNKNOWN 0
|
|
|
|
#define EXT4_FT_REG_FILE 1
|
|
|
|
#define EXT4_FT_DIR 2
|
|
|
|
#define EXT4_FT_CHRDEV 3
|
|
|
|
#define EXT4_FT_BLKDEV 4
|
|
|
|
#define EXT4_FT_FIFO 5
|
|
|
|
#define EXT4_FT_SOCK 6
|
|
|
|
#define EXT4_FT_SYMLINK 7
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_FT_MAX 8
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2012-04-29 22:23:10 +00:00
|
|
|
#define EXT4_FT_DIR_CSUM 0xDE
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* EXT4_DIR_PAD defines the directory entries boundaries
|
2006-10-11 08:20:50 +00:00
|
|
|
*
|
|
|
|
* NOTE: It must be a multiple of 4
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DIR_PAD 4
|
|
|
|
#define EXT4_DIR_ROUND (EXT4_DIR_PAD - 1)
|
2008-01-29 04:58:27 +00:00
|
|
|
#define EXT4_MAX_REC_LEN ((1<<16)-1)
|
|
|
|
|
2021-03-19 07:34:13 +00:00
|
|
|
/*
|
|
|
|
* The rec_len is dependent on the type of directory. Directories that are
|
|
|
|
* casefolded and encrypted need to store the hash as well, so we add room for
|
|
|
|
* ext4_extended_dir_entry_2. For all entries related to '.' or '..' you should
|
|
|
|
* pass NULL for dir, as those entries do not use the extra fields.
|
|
|
|
*/
|
|
|
|
static inline unsigned int ext4_dir_rec_len(__u8 name_len,
|
|
|
|
const struct inode *dir)
|
|
|
|
{
|
|
|
|
int rec_len = (name_len + 8 + EXT4_DIR_ROUND);
|
|
|
|
|
|
|
|
if (dir && ext4_hash_in_dirent(dir))
|
|
|
|
rec_len += sizeof(struct ext4_dir_entry_hash);
|
|
|
|
return (rec_len & ~EXT4_DIR_ROUND);
|
|
|
|
}
|
|
|
|
|
2010-08-05 05:46:37 +00:00
|
|
|
/*
|
|
|
|
* If we ever get support for fs block sizes > page_size, we'll need
|
|
|
|
* to remove the #if statements in the next two functions...
|
|
|
|
*/
|
|
|
|
static inline unsigned int
|
|
|
|
ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize)
|
|
|
|
{
|
|
|
|
unsigned len = le16_to_cpu(dlen);
|
|
|
|
|
2016-04-01 12:29:48 +00:00
|
|
|
#if (PAGE_SIZE >= 65536)
|
2010-08-05 05:46:37 +00:00
|
|
|
if (len == EXT4_MAX_REC_LEN || len == 0)
|
|
|
|
return blocksize;
|
|
|
|
return (len & 65532) | ((len & 3) << 16);
|
|
|
|
#else
|
|
|
|
return len;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
|
|
|
|
{
|
2021-12-28 07:32:52 +00:00
|
|
|
BUG_ON((len > blocksize) || (blocksize > (1 << 18)) || (len & 3));
|
2016-04-01 12:29:48 +00:00
|
|
|
#if (PAGE_SIZE >= 65536)
|
2010-08-05 05:46:37 +00:00
|
|
|
if (len < 65536)
|
|
|
|
return cpu_to_le16(len);
|
|
|
|
if (len == blocksize) {
|
|
|
|
if (blocksize == 65536)
|
|
|
|
return cpu_to_le16(EXT4_MAX_REC_LEN);
|
|
|
|
else
|
|
|
|
return cpu_to_le16(0);
|
|
|
|
}
|
|
|
|
return cpu_to_le16((len & 65532) | ((len >> 16) & 3));
|
|
|
|
#else
|
|
|
|
return cpu_to_le16(len);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Hash Tree Directory indexing
|
|
|
|
* (c) Daniel Phillips, 2001
|
|
|
|
*/
|
|
|
|
|
2015-10-17 20:18:43 +00:00
|
|
|
#define is_dx(dir) (ext4_has_feature_dir_index((dir)->i_sb) && \
|
2010-05-17 02:00:00 +00:00
|
|
|
ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
|
2017-08-05 23:47:34 +00:00
|
|
|
#define EXT4_DIR_LINK_MAX(dir) unlikely((dir)->i_nlink >= EXT4_LINK_MAX && \
|
|
|
|
!(ext4_has_feature_dir_nlink((dir)->i_sb) && is_dx(dir)))
|
2006-10-11 08:20:53 +00:00
|
|
|
#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* Legal values for the dx_root hash_version field: */
|
|
|
|
|
2018-01-08 04:36:19 +00:00
|
|
|
#define DX_HASH_LEGACY 0
|
|
|
|
#define DX_HASH_HALF_MD4 1
|
|
|
|
#define DX_HASH_TEA 2
|
|
|
|
#define DX_HASH_LEGACY_UNSIGNED 3
|
2008-10-28 17:21:44 +00:00
|
|
|
#define DX_HASH_HALF_MD4_UNSIGNED 4
|
|
|
|
#define DX_HASH_TEA_UNSIGNED 5
|
2021-03-19 07:34:13 +00:00
|
|
|
#define DX_HASH_SIPHASH 6
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2012-04-29 22:27:10 +00:00
|
|
|
static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
|
|
|
|
const void *address, unsigned int length)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
struct shash_desc shash;
|
2012-07-23 00:25:31 +00:00
|
|
|
char ctx[4];
|
2012-04-29 22:27:10 +00:00
|
|
|
} desc;
|
|
|
|
|
2012-07-23 00:25:31 +00:00
|
|
|
BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx));
|
|
|
|
|
2012-04-29 22:27:10 +00:00
|
|
|
desc.shash.tfm = sbi->s_chksum_driver;
|
|
|
|
*(u32 *)desc.ctx = crc;
|
|
|
|
|
2018-01-08 04:36:19 +00:00
|
|
|
BUG_ON(crypto_shash_update(&desc.shash, address, length));
|
2012-04-29 22:27:10 +00:00
|
|
|
|
|
|
|
return *(u32 *)desc.ctx;
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
|
|
|
|
/* hash info structure used by the directory hash */
|
|
|
|
struct dx_hash_info
|
|
|
|
{
|
|
|
|
u32 hash;
|
|
|
|
u32 minor_hash;
|
|
|
|
int hash_version;
|
|
|
|
u32 *seed;
|
|
|
|
};
|
|
|
|
|
2012-03-19 02:44:40 +00:00
|
|
|
|
|
|
|
/* 32 and 64 bit signed EOF for dx directories */
|
|
|
|
#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
|
|
|
|
#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
2006-10-11 08:20:53 +00:00
|
|
|
* Control parameters used by ext4_htree_next_block
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
|
|
|
#define HASH_NB_ALWAYS 1
|
|
|
|
|
2015-05-18 17:14:47 +00:00
|
|
|
struct ext4_filename {
|
|
|
|
const struct qstr *usr_fname;
|
2016-07-10 18:01:03 +00:00
|
|
|
struct fscrypt_str disk_name;
|
2015-05-18 17:14:47 +00:00
|
|
|
struct dx_hash_info hinfo;
|
2018-12-12 09:50:12 +00:00
|
|
|
#ifdef CONFIG_FS_ENCRYPTION
|
2016-07-10 18:01:03 +00:00
|
|
|
struct fscrypt_str crypto_buf;
|
2015-05-18 17:14:47 +00:00
|
|
|
#endif
|
2022-01-18 06:56:14 +00:00
|
|
|
#if IS_ENABLED(CONFIG_UNICODE)
|
2019-06-20 03:45:09 +00:00
|
|
|
struct fscrypt_str cf_name;
|
|
|
|
#endif
|
2015-05-18 17:14:47 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define fname_name(p) ((p)->disk_name.name)
|
2021-03-19 07:34:13 +00:00
|
|
|
#define fname_usr_name(p) ((p)->usr_fname->name)
|
2015-05-18 17:14:47 +00:00
|
|
|
#define fname_len(p) ((p)->disk_name.len)
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Describe an inode's exact location on disk and in memory
|
|
|
|
*/
|
2006-10-11 08:20:53 +00:00
|
|
|
struct ext4_iloc
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
|
|
|
struct buffer_head *bh;
|
|
|
|
unsigned long offset;
|
2008-01-29 04:58:27 +00:00
|
|
|
ext4_group_t block_group;
|
2006-10-11 08:20:50 +00:00
|
|
|
};
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
static inline struct ext4_inode *ext4_raw_inode(struct ext4_iloc *iloc)
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
2006-10-11 08:20:53 +00:00
|
|
|
return (struct ext4_inode *) (iloc->bh->b_data + iloc->offset);
|
2006-10-11 08:20:50 +00:00
|
|
|
}
|
|
|
|
|
2017-06-22 15:44:55 +00:00
|
|
|
static inline bool ext4_is_quota_file(struct inode *inode)
|
|
|
|
{
|
|
|
|
return IS_NOQUOTA(inode) &&
|
|
|
|
!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL);
|
|
|
|
}
|
2017-06-22 15:31:25 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* This structure is stuffed into the struct file's private_data field
|
|
|
|
* for directories. It is where we put information so that we can do
|
|
|
|
* readdir operations in hash tree order.
|
|
|
|
*/
|
|
|
|
struct dir_private_info {
|
|
|
|
struct rb_root root;
|
|
|
|
struct rb_node *curr_node;
|
|
|
|
struct fname *extra_fname;
|
|
|
|
loff_t last_pos;
|
|
|
|
__u32 curr_hash;
|
|
|
|
__u32 curr_minor_hash;
|
|
|
|
__u32 next_hash;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* calculate the first block number of the group */
|
2006-10-11 08:20:53 +00:00
|
|
|
static inline ext4_fsblk_t
|
2008-01-29 04:58:27 +00:00
|
|
|
ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
|
2006-10-11 08:20:50 +00:00
|
|
|
{
|
2006-10-11 08:20:53 +00:00
|
|
|
return group_no * (ext4_fsblk_t)EXT4_BLOCKS_PER_GROUP(sb) +
|
|
|
|
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
2006-10-11 08:20:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Special error return code only used by dx_probe() and its callers.
|
|
|
|
*/
|
2014-08-23 21:47:19 +00:00
|
|
|
#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2017-06-22 01:09:57 +00:00
|
|
|
/* htree levels for ext4 */
|
|
|
|
#define EXT4_HTREE_LEVEL_COMPAT 2
|
|
|
|
#define EXT4_HTREE_LEVEL 3
|
|
|
|
|
|
|
|
static inline int ext4_dir_htree_level(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return ext4_has_feature_largedir(sb) ?
|
|
|
|
EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
|
|
|
|
}
|
|
|
|
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
/*
|
|
|
|
* Timeout and state flag for lazy initialization inode thread.
|
|
|
|
*/
|
|
|
|
#define EXT4_DEF_LI_WAIT_MULT 10
|
|
|
|
#define EXT4_DEF_LI_MAX_START_DELAY 5
|
|
|
|
#define EXT4_LAZYINIT_QUIT 0x0001
|
|
|
|
#define EXT4_LAZYINIT_RUNNING 0x0002
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lazy inode table initialization info
|
|
|
|
*/
|
|
|
|
struct ext4_lazy_init {
|
|
|
|
unsigned long li_state;
|
|
|
|
struct list_head li_request_list;
|
|
|
|
struct mutex li_list_mtx;
|
|
|
|
};
|
|
|
|
|
2020-07-17 04:14:40 +00:00
|
|
|
enum ext4_li_mode {
|
|
|
|
EXT4_LI_MODE_PREFETCH_BBITMAP,
|
|
|
|
EXT4_LI_MODE_ITABLE,
|
|
|
|
};
|
|
|
|
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
struct ext4_li_request {
|
|
|
|
struct super_block *lr_super;
|
2020-07-17 04:14:40 +00:00
|
|
|
enum ext4_li_mode lr_mode;
|
|
|
|
ext4_group_t lr_first_not_zeroed;
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
ext4_group_t lr_next_group;
|
|
|
|
struct list_head lr_request;
|
|
|
|
unsigned long lr_next_sched;
|
|
|
|
unsigned long lr_timeout;
|
|
|
|
};
|
|
|
|
|
2010-10-28 01:30:05 +00:00
|
|
|
struct ext4_features {
|
|
|
|
struct kobject f_kobj;
|
|
|
|
struct completion f_kobj_unregister;
|
|
|
|
};
|
|
|
|
|
2011-05-24 22:31:25 +00:00
|
|
|
/*
|
|
|
|
* This structure will be used for multiple mount protection. It will be
|
|
|
|
* written into the block number saved in the s_mmp_block field in the
|
|
|
|
* superblock. Programs that check MMP should assume that if
|
|
|
|
* SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
|
|
|
|
* to use the filesystem, regardless of how old the timestamp is.
|
|
|
|
*/
|
|
|
|
#define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */
|
|
|
|
#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
|
|
|
|
#define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */
|
|
|
|
#define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */
|
|
|
|
|
|
|
|
struct mmp_struct {
|
|
|
|
__le32 mmp_magic; /* Magic number for MMP */
|
|
|
|
__le32 mmp_seq; /* Sequence no. updated periodically */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mmp_time, mmp_nodename & mmp_bdevname are only used for information
|
|
|
|
* purposes and do not affect the correctness of the algorithm
|
|
|
|
*/
|
|
|
|
__le64 mmp_time; /* Time last updated */
|
|
|
|
char mmp_nodename[64]; /* Node which last updated MMP block */
|
|
|
|
char mmp_bdevname[32]; /* Bdev which last updated MMP block */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* mmp_check_interval is used to verify if the MMP block has been
|
|
|
|
* updated on the block device. The value is updated based on the
|
|
|
|
* maximum time to write the MMP block during an update cycle.
|
|
|
|
*/
|
|
|
|
__le16 mmp_check_interval;
|
|
|
|
|
|
|
|
__le16 mmp_pad1;
|
2012-04-29 22:23:10 +00:00
|
|
|
__le32 mmp_pad2[226];
|
|
|
|
__le32 mmp_checksum; /* crc32c(uuid+mmp_block) */
|
2011-05-24 22:31:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/* arguments passed to the mmp thread */
|
|
|
|
struct mmpd_data {
|
|
|
|
struct buffer_head *bh; /* bh from initial read_mmp_block() */
|
|
|
|
struct super_block *sb; /* super block of the fs */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check interval multiplier
|
|
|
|
* The MMP block is written every update interval and initially checked every
|
|
|
|
* update interval x the multiplier (the value is then adapted based on the
|
|
|
|
* write latency). The reason is that writes can be delayed under load and we
|
|
|
|
* don't want readers to incorrectly assume that the filesystem is no longer
|
|
|
|
* in use.
|
|
|
|
*/
|
|
|
|
#define EXT4_MMP_CHECK_MULT 2UL
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimum interval for MMP checking in seconds.
|
|
|
|
*/
|
|
|
|
#define EXT4_MMP_MIN_CHECK_INTERVAL 5UL
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum interval for MMP checking in seconds.
|
|
|
|
*/
|
|
|
|
#define EXT4_MMP_MAX_CHECK_INTERVAL 300UL
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Function prototypes
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ok, these declarations are also in <linux/kernel.h> but none of the
|
2006-10-11 08:20:53 +00:00
|
|
|
* ext4 source programs needs to include it so they are duplicated here.
|
2006-10-11 08:20:50 +00:00
|
|
|
*/
|
2007-05-24 17:04:54 +00:00
|
|
|
# define NORET_TYPE /**/
|
|
|
|
# define ATTRIB_NORET __attribute__((noreturn))
|
|
|
|
# define NORET_AND noreturn,
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2008-11-05 05:14:04 +00:00
|
|
|
/* bitmap.c */
|
2012-06-30 23:14:57 +00:00
|
|
|
extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
|
2023-02-21 20:30:25 +00:00
|
|
|
void ext4_inode_bitmap_csum_set(struct super_block *sb,
|
2012-04-29 22:33:10 +00:00
|
|
|
struct ext4_group_desc *gdp,
|
|
|
|
struct buffer_head *bh, int sz);
|
2023-02-21 20:30:24 +00:00
|
|
|
int ext4_inode_bitmap_csum_verify(struct super_block *sb,
|
2012-04-29 22:33:10 +00:00
|
|
|
struct ext4_group_desc *gdp,
|
|
|
|
struct buffer_head *bh, int sz);
|
2023-02-21 20:30:27 +00:00
|
|
|
void ext4_block_bitmap_csum_set(struct super_block *sb,
|
2012-04-29 22:35:10 +00:00
|
|
|
struct ext4_group_desc *gdp,
|
2012-10-22 04:34:32 +00:00
|
|
|
struct buffer_head *bh);
|
2023-02-21 20:30:26 +00:00
|
|
|
int ext4_block_bitmap_csum_verify(struct super_block *sb,
|
2012-04-29 22:35:10 +00:00
|
|
|
struct ext4_group_desc *gdp,
|
2012-10-22 04:34:32 +00:00
|
|
|
struct buffer_head *bh);
|
2008-11-05 05:14:04 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* balloc.c */
|
2013-04-04 03:32:34 +00:00
|
|
|
extern void ext4_get_group_no_and_offset(struct super_block *sb,
|
|
|
|
ext4_fsblk_t blocknr,
|
|
|
|
ext4_group_t *blockgrpp,
|
|
|
|
ext4_grpblk_t *offsetp);
|
|
|
|
extern ext4_group_t ext4_get_group_number(struct super_block *sb,
|
|
|
|
ext4_fsblk_t block);
|
|
|
|
|
2008-01-29 04:58:27 +00:00
|
|
|
extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
|
|
|
|
extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
|
|
|
|
ext4_group_t group);
|
2008-07-11 23:27:31 +00:00
|
|
|
extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
|
2011-05-25 11:41:26 +00:00
|
|
|
ext4_fsblk_t goal,
|
|
|
|
unsigned int flags,
|
|
|
|
unsigned long *count,
|
|
|
|
int *errp);
|
2011-09-09 23:14:51 +00:00
|
|
|
extern int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
|
|
|
|
s64 nclusters, unsigned int flags);
|
2011-09-09 23:10:51 +00:00
|
|
|
extern ext4_fsblk_t ext4_count_free_clusters(struct super_block *);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
|
2008-01-29 04:58:27 +00:00
|
|
|
ext4_group_t block_group,
|
2006-10-11 08:20:50 +00:00
|
|
|
struct buffer_head ** bh);
|
ext4: allow ext4_get_group_info() to fail
Previously, ext4_get_group_info() would treat an invalid group number
as BUG(), since in theory it should never happen. However, if a
malicious attaker (or fuzzer) modifies the superblock via the block
device while it is the file system is mounted, it is possible for
s_first_data_block to get set to a very large number. In that case,
when calculating the block group of some block number (such as the
starting block of a preallocation region), could result in an
underflow and very large block group number. Then the BUG_ON check in
ext4_get_group_info() would fire, resutling in a denial of service
attack that can be triggered by root or someone with write access to
the block device.
For a quality of implementation perspective, it's best that even if
the system administrator does something that they shouldn't, that it
will not trigger a BUG. So instead of BUG'ing, ext4_get_group_info()
will call ext4_error and return NULL. We also add fallback code in
all of the callers of ext4_get_group_info() that it might NULL.
Also, since ext4_get_group_info() was already borderline to be an
inline function, un-inline it. The results in a next reduction of the
compiled text size of ext4 by roughly 2k.
Cc: stable@kernel.org
Link: https://lore.kernel.org/r/20230430154311.579720-2-tytso@mit.edu
Reported-by: syzbot+e2efa3efc15a1c9e95c3@syzkaller.appspotmail.com
Link: https://syzkaller.appspot.com/bug?id=69b28112e098b070f639efb356393af3ffec4220
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Jan Kara <jack@suse.cz>
2023-04-29 04:06:28 +00:00
|
|
|
extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
|
|
|
|
ext4_group_t group);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
|
2012-02-20 22:52:46 +00:00
|
|
|
|
|
|
|
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
|
2020-04-21 07:54:07 +00:00
|
|
|
ext4_group_t block_group,
|
|
|
|
bool ignore_locked);
|
2012-02-20 22:52:46 +00:00
|
|
|
extern int ext4_wait_block_bitmap(struct super_block *sb,
|
|
|
|
ext4_group_t block_group,
|
|
|
|
struct buffer_head *bh);
|
|
|
|
extern struct buffer_head *ext4_read_block_bitmap(struct super_block *sb,
|
|
|
|
ext4_group_t block_group);
|
2011-09-09 23:12:51 +00:00
|
|
|
extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
|
|
|
|
ext4_group_t block_group,
|
|
|
|
struct ext4_group_desc *gdp);
|
2011-06-28 14:01:31 +00:00
|
|
|
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2022-01-18 06:56:14 +00:00
|
|
|
#if IS_ENABLED(CONFIG_UNICODE)
|
2021-03-19 07:34:14 +00:00
|
|
|
extern int ext4_fname_setup_ci_filename(struct inode *dir,
|
2019-06-20 03:45:09 +00:00
|
|
|
const struct qstr *iname,
|
2021-03-19 07:34:14 +00:00
|
|
|
struct ext4_filename *fname);
|
2019-06-20 03:45:09 +00:00
|
|
|
#endif
|
|
|
|
|
2022-05-15 06:37:46 +00:00
|
|
|
/* ext4 encryption related stuff goes here crypto.c */
|
2018-12-12 09:50:12 +00:00
|
|
|
#ifdef CONFIG_FS_ENCRYPTION
|
2022-05-15 06:37:46 +00:00
|
|
|
extern const struct fscrypt_operations ext4_cryptops;
|
|
|
|
|
2022-05-15 06:37:47 +00:00
|
|
|
int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
|
|
|
|
int lookup, struct ext4_filename *fname);
|
2015-04-12 04:56:17 +00:00
|
|
|
|
2022-05-15 06:37:47 +00:00
|
|
|
int ext4_fname_prepare_lookup(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct ext4_filename *fname);
|
ext4 crypto: reorganize how we store keys in the inode
This is a pretty massive patch which does a number of different things:
1) The per-inode encryption information is now stored in an allocated
data structure, ext4_crypt_info, instead of directly in the node.
This reduces the size usage of an in-memory inode when it is not
using encryption.
2) We drop the ext4_fname_crypto_ctx entirely, and use the per-inode
encryption structure instead. This remove an unnecessary memory
allocation and free for the fname_crypto_ctx as well as allowing us
to reuse the ctfm in a directory for multiple lookups and file
creations.
3) We also cache the inode's policy information in the ext4_crypt_info
structure so we don't have to continually read it out of the
extended attributes.
4) We now keep the keyring key in the inode's encryption structure
instead of releasing it after we are done using it to derive the
per-inode key. This allows us to test to see if the key has been
revoked; if it has, we prevent the use of the derived key and free
it.
5) When an inode is released (or when the derived key is freed), we
will use memset_explicit() to zero out the derived key, so it's not
left hanging around in memory. This implies that when a user logs
out, it is important to first revoke the key, and then unlink it,
and then finally, to use "echo 3 > /proc/sys/vm/drop_caches" to
release any decrypted pages and dcache entries from the system
caches.
6) All this, and we also shrink the number of lines of code by around
100. :-)
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2015-05-18 17:17:47 +00:00
|
|
|
|
2022-05-15 06:37:47 +00:00
|
|
|
void ext4_fname_free_filename(struct ext4_filename *fname);
|
2019-06-20 03:45:09 +00:00
|
|
|
|
2022-05-15 06:37:48 +00:00
|
|
|
int ext4_ioctl_get_encryption_pwsalt(struct file *filp, void __user *arg);
|
|
|
|
|
fscrypt: fix race where ->lookup() marks plaintext dentry as ciphertext
->lookup() in an encrypted directory begins as follows:
1. fscrypt_prepare_lookup():
a. Try to load the directory's encryption key.
b. If the key is unavailable, mark the dentry as a ciphertext name
via d_flags.
2. fscrypt_setup_filename():
a. Try to load the directory's encryption key.
b. If the key is available, encrypt the name (treated as a plaintext
name) to get the on-disk name. Otherwise decode the name
(treated as a ciphertext name) to get the on-disk name.
But if the key is concurrently added, it may be found at (2a) but not at
(1a). In this case, the dentry will be wrongly marked as a ciphertext
name even though it was actually treated as plaintext.
This will cause the dentry to be wrongly invalidated on the next lookup,
potentially causing problems. For example, if the racy ->lookup() was
part of sys_mount(), then the new mount will be detached when anything
tries to access it. This is despite the mountpoint having a plaintext
path, which should remain valid now that the key was added.
Of course, this is only possible if there's a userspace race. Still,
the additional kernel-side race is confusing and unexpected.
Close the kernel-side race by changing fscrypt_prepare_lookup() to also
set the on-disk filename (step 2b), consistent with the d_flags update.
Fixes: 28b4c263961c ("ext4 crypto: revalidate dentry after adding or removing the key")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-03-20 18:39:13 +00:00
|
|
|
#else /* !CONFIG_FS_ENCRYPTION */
|
2016-07-10 18:01:03 +00:00
|
|
|
static inline int ext4_fname_setup_filename(struct inode *dir,
|
fscrypt: fix race where ->lookup() marks plaintext dentry as ciphertext
->lookup() in an encrypted directory begins as follows:
1. fscrypt_prepare_lookup():
a. Try to load the directory's encryption key.
b. If the key is unavailable, mark the dentry as a ciphertext name
via d_flags.
2. fscrypt_setup_filename():
a. Try to load the directory's encryption key.
b. If the key is available, encrypt the name (treated as a plaintext
name) to get the on-disk name. Otherwise decode the name
(treated as a ciphertext name) to get the on-disk name.
But if the key is concurrently added, it may be found at (2a) but not at
(1a). In this case, the dentry will be wrongly marked as a ciphertext
name even though it was actually treated as plaintext.
This will cause the dentry to be wrongly invalidated on the next lookup,
potentially causing problems. For example, if the racy ->lookup() was
part of sys_mount(), then the new mount will be detached when anything
tries to access it. This is despite the mountpoint having a plaintext
path, which should remain valid now that the key was added.
Of course, this is only possible if there's a userspace race. Still,
the additional kernel-side race is confusing and unexpected.
Close the kernel-side race by changing fscrypt_prepare_lookup() to also
set the on-disk filename (step 2b), consistent with the d_flags update.
Fixes: 28b4c263961c ("ext4 crypto: revalidate dentry after adding or removing the key")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-03-20 18:39:13 +00:00
|
|
|
const struct qstr *iname,
|
|
|
|
int lookup,
|
|
|
|
struct ext4_filename *fname)
|
ext4 crypto: reorganize how we store keys in the inode
This is a pretty massive patch which does a number of different things:
1) The per-inode encryption information is now stored in an allocated
data structure, ext4_crypt_info, instead of directly in the node.
This reduces the size usage of an in-memory inode when it is not
using encryption.
2) We drop the ext4_fname_crypto_ctx entirely, and use the per-inode
encryption structure instead. This remove an unnecessary memory
allocation and free for the fname_crypto_ctx as well as allowing us
to reuse the ctfm in a directory for multiple lookups and file
creations.
3) We also cache the inode's policy information in the ext4_crypt_info
structure so we don't have to continually read it out of the
extended attributes.
4) We now keep the keyring key in the inode's encryption structure
instead of releasing it after we are done using it to derive the
per-inode key. This allows us to test to see if the key has been
revoked; if it has, we prevent the use of the derived key and free
it.
5) When an inode is released (or when the derived key is freed), we
will use memset_explicit() to zero out the derived key, so it's not
left hanging around in memory. This implies that when a user logs
out, it is important to first revoke the key, and then unlink it,
and then finally, to use "echo 3 > /proc/sys/vm/drop_caches" to
release any decrypted pages and dcache entries from the system
caches.
6) All this, and we also shrink the number of lines of code by around
100. :-)
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2015-05-18 17:17:47 +00:00
|
|
|
{
|
2021-03-19 07:34:14 +00:00
|
|
|
int err = 0;
|
2016-07-10 18:01:03 +00:00
|
|
|
fname->usr_fname = iname;
|
|
|
|
fname->disk_name.name = (unsigned char *) iname->name;
|
|
|
|
fname->disk_name.len = iname->len;
|
2019-06-20 03:45:09 +00:00
|
|
|
|
2022-01-18 06:56:14 +00:00
|
|
|
#if IS_ENABLED(CONFIG_UNICODE)
|
2021-03-19 07:34:14 +00:00
|
|
|
err = ext4_fname_setup_ci_filename(dir, iname, fname);
|
2019-06-20 03:45:09 +00:00
|
|
|
#endif
|
|
|
|
|
2021-03-19 07:34:14 +00:00
|
|
|
return err;
|
ext4 crypto: reorganize how we store keys in the inode
This is a pretty massive patch which does a number of different things:
1) The per-inode encryption information is now stored in an allocated
data structure, ext4_crypt_info, instead of directly in the node.
This reduces the size usage of an in-memory inode when it is not
using encryption.
2) We drop the ext4_fname_crypto_ctx entirely, and use the per-inode
encryption structure instead. This remove an unnecessary memory
allocation and free for the fname_crypto_ctx as well as allowing us
to reuse the ctfm in a directory for multiple lookups and file
creations.
3) We also cache the inode's policy information in the ext4_crypt_info
structure so we don't have to continually read it out of the
extended attributes.
4) We now keep the keyring key in the inode's encryption structure
instead of releasing it after we are done using it to derive the
per-inode key. This allows us to test to see if the key has been
revoked; if it has, we prevent the use of the derived key and free
it.
5) When an inode is released (or when the derived key is freed), we
will use memset_explicit() to zero out the derived key, so it's not
left hanging around in memory. This implies that when a user logs
out, it is important to first revoke the key, and then unlink it,
and then finally, to use "echo 3 > /proc/sys/vm/drop_caches" to
release any decrypted pages and dcache entries from the system
caches.
6) All this, and we also shrink the number of lines of code by around
100. :-)
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2015-05-18 17:17:47 +00:00
|
|
|
}
|
2015-04-12 04:55:06 +00:00
|
|
|
|
fscrypt: fix race where ->lookup() marks plaintext dentry as ciphertext
->lookup() in an encrypted directory begins as follows:
1. fscrypt_prepare_lookup():
a. Try to load the directory's encryption key.
b. If the key is unavailable, mark the dentry as a ciphertext name
via d_flags.
2. fscrypt_setup_filename():
a. Try to load the directory's encryption key.
b. If the key is available, encrypt the name (treated as a plaintext
name) to get the on-disk name. Otherwise decode the name
(treated as a ciphertext name) to get the on-disk name.
But if the key is concurrently added, it may be found at (2a) but not at
(1a). In this case, the dentry will be wrongly marked as a ciphertext
name even though it was actually treated as plaintext.
This will cause the dentry to be wrongly invalidated on the next lookup,
potentially causing problems. For example, if the racy ->lookup() was
part of sys_mount(), then the new mount will be detached when anything
tries to access it. This is despite the mountpoint having a plaintext
path, which should remain valid now that the key was added.
Of course, this is only possible if there's a userspace race. Still,
the additional kernel-side race is confusing and unexpected.
Close the kernel-side race by changing fscrypt_prepare_lookup() to also
set the on-disk filename (step 2b), consistent with the d_flags update.
Fixes: 28b4c263961c ("ext4 crypto: revalidate dentry after adding or removing the key")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-03-20 18:39:13 +00:00
|
|
|
static inline int ext4_fname_prepare_lookup(struct inode *dir,
|
|
|
|
struct dentry *dentry,
|
|
|
|
struct ext4_filename *fname)
|
|
|
|
{
|
|
|
|
return ext4_fname_setup_filename(dir, &dentry->d_name, 1, fname);
|
|
|
|
}
|
|
|
|
|
2019-06-20 03:45:09 +00:00
|
|
|
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
|
|
|
|
{
|
2022-01-18 06:56:14 +00:00
|
|
|
#if IS_ENABLED(CONFIG_UNICODE)
|
2019-06-20 03:45:09 +00:00
|
|
|
kfree(fname->cf_name.name);
|
|
|
|
fname->cf_name.name = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
2022-05-15 06:37:48 +00:00
|
|
|
|
|
|
|
static inline int ext4_ioctl_get_encryption_pwsalt(struct file *filp,
|
|
|
|
void __user *arg)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
fscrypt: fix race where ->lookup() marks plaintext dentry as ciphertext
->lookup() in an encrypted directory begins as follows:
1. fscrypt_prepare_lookup():
a. Try to load the directory's encryption key.
b. If the key is unavailable, mark the dentry as a ciphertext name
via d_flags.
2. fscrypt_setup_filename():
a. Try to load the directory's encryption key.
b. If the key is available, encrypt the name (treated as a plaintext
name) to get the on-disk name. Otherwise decode the name
(treated as a ciphertext name) to get the on-disk name.
But if the key is concurrently added, it may be found at (2a) but not at
(1a). In this case, the dentry will be wrongly marked as a ciphertext
name even though it was actually treated as plaintext.
This will cause the dentry to be wrongly invalidated on the next lookup,
potentially causing problems. For example, if the racy ->lookup() was
part of sys_mount(), then the new mount will be detached when anything
tries to access it. This is despite the mountpoint having a plaintext
path, which should remain valid now that the key was added.
Of course, this is only possible if there's a userspace race. Still,
the additional kernel-side race is confusing and unexpected.
Close the kernel-side race by changing fscrypt_prepare_lookup() to also
set the on-disk filename (step 2b), consistent with the d_flags update.
Fixes: 28b4c263961c ("ext4 crypto: revalidate dentry after adding or removing the key")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-03-20 18:39:13 +00:00
|
|
|
#endif /* !CONFIG_FS_ENCRYPTION */
|
2015-04-12 04:55:06 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* dir.c */
|
2010-07-27 15:54:40 +00:00
|
|
|
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
|
2011-01-10 17:10:55 +00:00
|
|
|
struct file *,
|
2010-07-27 15:54:40 +00:00
|
|
|
struct ext4_dir_entry_2 *,
|
2012-12-10 19:05:58 +00:00
|
|
|
struct buffer_head *, char *, int,
|
|
|
|
unsigned int);
|
2021-03-19 07:34:13 +00:00
|
|
|
#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset) \
|
2011-01-10 17:10:55 +00:00
|
|
|
unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
|
2021-03-19 07:34:13 +00:00
|
|
|
(de), (bh), (buf), (size), (offset)))
|
2006-10-11 08:20:53 +00:00
|
|
|
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
|
2015-04-12 04:56:26 +00:00
|
|
|
__u32 minor_hash,
|
|
|
|
struct ext4_dir_entry_2 *dirent,
|
2016-07-10 18:01:03 +00:00
|
|
|
struct fscrypt_str *ent_name);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
|
2012-12-10 19:05:58 +00:00
|
|
|
extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
|
|
|
|
struct buffer_head *bh,
|
|
|
|
void *buf, int buf_size,
|
2015-05-18 17:14:47 +00:00
|
|
|
struct ext4_filename *fname,
|
2012-12-10 19:05:58 +00:00
|
|
|
struct ext4_dir_entry_2 **dest_de);
|
2021-03-19 07:34:13 +00:00
|
|
|
void ext4_insert_dentry(struct inode *dir, struct inode *inode,
|
2017-04-30 03:27:26 +00:00
|
|
|
struct ext4_dir_entry_2 *de,
|
|
|
|
int buf_size,
|
|
|
|
struct ext4_filename *fname);
|
2012-12-10 19:05:58 +00:00
|
|
|
static inline void ext4_update_dx_flag(struct inode *inode)
|
|
|
|
{
|
2020-11-18 15:30:32 +00:00
|
|
|
if (!ext4_has_feature_dir_index(inode->i_sb) &&
|
|
|
|
ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
|
2020-02-10 14:43:16 +00:00
|
|
|
/* ext4_iget() should have caught this... */
|
|
|
|
WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
|
2012-12-10 19:05:58 +00:00
|
|
|
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
|
2020-02-10 14:43:16 +00:00
|
|
|
}
|
2012-12-10 19:05:58 +00:00
|
|
|
}
|
2017-04-30 03:47:50 +00:00
|
|
|
static const unsigned char ext4_filetype_table[] = {
|
2012-12-10 19:05:59 +00:00
|
|
|
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline unsigned char get_dtype(struct super_block *sb, int filetype)
|
|
|
|
{
|
2015-10-17 20:18:43 +00:00
|
|
|
if (!ext4_has_feature_filetype(sb) || filetype >= EXT4_FT_MAX)
|
2012-12-10 19:05:59 +00:00
|
|
|
return DT_UNKNOWN;
|
|
|
|
|
|
|
|
return ext4_filetype_table[filetype];
|
|
|
|
}
|
2014-07-28 17:06:26 +00:00
|
|
|
extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
|
|
|
|
void *buf, int buf_size);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* fsync.c */
|
2011-07-17 00:44:56 +00:00
|
|
|
extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* hash.c */
|
ext4: Support case-insensitive file name lookups
This patch implements the actual support for case-insensitive file name
lookups in ext4, based on the feature bit and the encoding stored in the
superblock.
A filesystem that has the casefold feature set is able to configure
directories with the +F (EXT4_CASEFOLD_FL) attribute, enabling lookups
to succeed in that directory in a case-insensitive fashion, i.e: match
a directory entry even if the name used by userspace is not a byte per
byte match with the disk name, but is an equivalent case-insensitive
version of the Unicode string. This operation is called a
case-insensitive file name lookup.
The feature is configured as an inode attribute applied to directories
and inherited by its children. This attribute can only be enabled on
empty directories for filesystems that support the encoding feature,
thus preventing collision of file names that only differ by case.
* dcache handling:
For a +F directory, Ext4 only stores the first equivalent name dentry
used in the dcache. This is done to prevent unintentional duplication of
dentries in the dcache, while also allowing the VFS code to quickly find
the right entry in the cache despite which equivalent string was used in
a previous lookup, without having to resort to ->lookup().
d_hash() of casefolded directories is implemented as the hash of the
casefolded string, such that we always have a well-known bucket for all
the equivalencies of the same string. d_compare() uses the
utf8_strncasecmp() infrastructure, which handles the comparison of
equivalent, same case, names as well.
For now, negative lookups are not inserted in the dcache, since they
would need to be invalidated anyway, because we can't trust missing file
dentries. This is bad for performance but requires some leveraging of
the vfs layer to fix. We can live without that for now, and so does
everyone else.
* on-disk data:
Despite using a specific version of the name as the internal
representation within the dcache, the name stored and fetched from the
disk is a byte-per-byte match with what the user requested, making this
implementation 'name-preserving'. i.e. no actual information is lost
when writing to storage.
DX is supported by modifying the hashes used in +F directories to make
them case/encoding-aware. The new disk hashes are calculated as the
hash of the full casefolded string, instead of the string directly.
This allows us to efficiently search for file names in the htree without
requiring the user to provide an exact name.
* Dealing with invalid sequences:
By default, when a invalid UTF-8 sequence is identified, ext4 will treat
it as an opaque byte sequence, ignoring the encoding and reverting to
the old behavior for that unique file. This means that case-insensitive
file name lookup will not work only for that file. An optional bit can
be set in the superblock telling the filesystem code and userspace tools
to enforce the encoding. When that optional bit is set, any attempt to
create a file name using an invalid UTF-8 sequence will fail and return
an error to userspace.
* Normalization algorithm:
The UTF-8 algorithms used to compare strings in ext4 is implemented
lives in fs/unicode, and is based on a previous version developed by
SGI. It implements the Canonical decomposition (NFD) algorithm
described by the Unicode specification 12.1, or higher, combined with
the elimination of ignorable code points (NFDi) and full
case-folding (CF) as documented in fs/unicode/utf8_norm.c.
NFD seems to be the best normalization method for EXT4 because:
- It has a lower cost than NFC/NFKC (which requires
decomposing to NFD as an intermediary step)
- It doesn't eliminate important semantic meaning like
compatibility decompositions.
Although:
- This implementation is not completely linguistic accurate, because
different languages have conflicting rules, which would require the
specialization of the filesystem to a given locale, which brings all
sorts of problems for removable media and for users who use more than
one language.
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-04-25 18:12:08 +00:00
|
|
|
extern int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
|
|
|
|
struct dx_hash_info *hinfo);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* ialloc.c */
|
2020-10-15 20:37:59 +00:00
|
|
|
extern int ext4_mark_inode_used(struct super_block *sb, int ino);
|
2023-01-13 11:49:25 +00:00
|
|
|
extern struct inode *__ext4_new_inode(struct mnt_idmap *, handle_t *,
|
2021-01-21 13:19:57 +00:00
|
|
|
struct inode *, umode_t,
|
2013-02-09 21:27:09 +00:00
|
|
|
const struct qstr *qstr, __u32 goal,
|
2017-06-22 01:21:39 +00:00
|
|
|
uid_t *owner, __u32 i_flags,
|
|
|
|
int handle_type, unsigned int line_no,
|
|
|
|
int nblocks);
|
2013-02-09 21:27:09 +00:00
|
|
|
|
2021-01-21 13:19:57 +00:00
|
|
|
#define ext4_new_inode(handle, dir, mode, qstr, goal, owner, i_flags) \
|
2023-01-13 11:49:25 +00:00
|
|
|
__ext4_new_inode(&nop_mnt_idmap, (handle), (dir), (mode), (qstr), \
|
2021-01-21 13:19:57 +00:00
|
|
|
(goal), (owner), i_flags, 0, 0, 0)
|
2023-01-13 11:49:25 +00:00
|
|
|
#define ext4_new_inode_start_handle(idmap, dir, mode, qstr, goal, owner, \
|
2013-02-09 21:27:09 +00:00
|
|
|
type, nblocks) \
|
2023-01-13 11:49:25 +00:00
|
|
|
__ext4_new_inode((idmap), NULL, (dir), (mode), (qstr), (goal), (owner), \
|
2017-06-22 01:21:39 +00:00
|
|
|
0, (type), __LINE__, (nblocks))
|
2013-02-09 21:27:09 +00:00
|
|
|
|
|
|
|
|
2008-09-09 02:25:24 +00:00
|
|
|
extern void ext4_free_inode(handle_t *, struct inode *);
|
|
|
|
extern struct inode * ext4_orphan_get(struct super_block *, unsigned long);
|
|
|
|
extern unsigned long ext4_count_free_inodes(struct super_block *);
|
|
|
|
extern unsigned long ext4_count_dirs(struct super_block *);
|
2010-10-28 01:30:15 +00:00
|
|
|
extern void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap);
|
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2010-10-28 01:30:05 +00:00
|
|
|
extern int ext4_init_inode_table(struct super_block *sb,
|
|
|
|
ext4_group_t group, int barrier);
|
2012-02-20 22:52:46 +00:00
|
|
|
extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2020-10-15 20:37:55 +00:00
|
|
|
/* fast_commit.c */
|
2020-10-15 20:38:01 +00:00
|
|
|
int ext4_fc_info_show(struct seq_file *seq, void *v);
|
2020-10-15 20:37:55 +00:00
|
|
|
void ext4_fc_init(struct super_block *sb, journal_t *journal);
|
2020-10-15 20:37:57 +00:00
|
|
|
void ext4_fc_init_inode(struct inode *inode);
|
2020-11-06 03:58:53 +00:00
|
|
|
void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t start,
|
2020-10-15 20:37:57 +00:00
|
|
|
ext4_lblk_t end);
|
2020-11-06 03:58:53 +00:00
|
|
|
void __ext4_fc_track_unlink(handle_t *handle, struct inode *inode,
|
|
|
|
struct dentry *dentry);
|
|
|
|
void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
|
|
|
|
struct dentry *dentry);
|
|
|
|
void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
|
|
|
|
void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
|
ext4: fix rename whiteout with fast commit
This patch adds rename whiteout support in fast commits. Note that the
whiteout object that gets created is actually char device. Which
imples, the function ext4_inode_journal_mode(struct inode *inode)
would return "JOURNAL_DATA" for this inode. This has a consequence in
fast commit code that it will make creation of the whiteout object a
fast-commit ineligible behavior and thus will fall back to full
commits. With this patch, this can be observed by running fast commits
with rename whiteout and seeing the stats generated by ext4_fc_stats
tracepoint as follows:
ext4_fc_stats: dev 254:32 fc ineligible reasons:
XATTR:0, CROSS_RENAME:0, JOURNAL_FLAG_CHANGE:0, NO_MEM:0, SWAP_BOOT:0,
RESIZE:0, RENAME_DIR:0, FALLOC_RANGE:0, INODE_JOURNAL_DATA:16;
num_commits:6, ineligible: 6, numblks: 3
So in short, this patch guarantees that in case of rename whiteout, we
fall back to full commits.
Amir mentioned that instead of creating a new whiteout object for
every rename, we can create a static whiteout object with irrelevant
nlink. That will make fast commits to not fall back to full
commit. But until this happens, this patch will ensure correctness by
falling back to full commits.
Fixes: 8016e29f4362 ("ext4: fast commit recovery path")
Cc: stable@kernel.org
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Link: https://lore.kernel.org/r/20210316221921.1124955-1-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-03-16 22:19:21 +00:00
|
|
|
void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
|
|
|
|
struct dentry *dentry);
|
2020-11-06 03:58:53 +00:00
|
|
|
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
|
|
|
|
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
|
2022-01-17 09:36:54 +00:00
|
|
|
void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
|
2020-10-15 20:37:57 +00:00
|
|
|
void ext4_fc_start_update(struct inode *inode);
|
|
|
|
void ext4_fc_stop_update(struct inode *inode);
|
|
|
|
void ext4_fc_del(struct inode *inode);
|
2020-10-15 20:37:59 +00:00
|
|
|
bool ext4_fc_replay_check_excluded(struct super_block *sb, ext4_fsblk_t block);
|
|
|
|
void ext4_fc_replay_cleanup(struct super_block *sb);
|
2020-10-15 20:37:57 +00:00
|
|
|
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
|
|
|
|
int __init ext4_fc_init_dentry_cache(void);
|
2021-12-23 16:44:36 +00:00
|
|
|
void ext4_fc_destroy_dentry_cache(void);
|
2022-01-10 03:51:40 +00:00
|
|
|
int ext4_fc_record_regions(struct super_block *sb, int ino,
|
|
|
|
ext4_lblk_t lblk, ext4_fsblk_t pblk,
|
|
|
|
int len, int replay);
|
2020-10-15 20:37:57 +00:00
|
|
|
|
2008-01-29 05:19:52 +00:00
|
|
|
/* mballoc.c */
|
2018-04-11 09:37:23 +00:00
|
|
|
extern const struct seq_operations ext4_mb_seq_groups_ops;
|
2021-04-01 17:21:28 +00:00
|
|
|
extern const struct seq_operations ext4_mb_seq_structs_summary_ops;
|
2021-04-01 17:21:25 +00:00
|
|
|
extern int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset);
|
2012-05-28 18:19:25 +00:00
|
|
|
extern int ext4_mb_init(struct super_block *);
|
2008-01-29 05:19:52 +00:00
|
|
|
extern int ext4_mb_release(struct super_block *);
|
|
|
|
extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
|
|
|
|
struct ext4_allocation_request *, int *);
|
2020-08-17 07:36:15 +00:00
|
|
|
extern void ext4_discard_preallocations(struct inode *, unsigned int);
|
2010-10-28 01:30:14 +00:00
|
|
|
extern int __init ext4_init_mballoc(void);
|
|
|
|
extern void ext4_exit_mballoc(void);
|
2020-07-17 04:14:40 +00:00
|
|
|
extern ext4_group_t ext4_mb_prefetch(struct super_block *sb,
|
|
|
|
ext4_group_t group,
|
|
|
|
unsigned int nr, int *cnt);
|
|
|
|
extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
|
|
|
|
unsigned int nr);
|
|
|
|
|
2009-11-22 12:44:56 +00:00
|
|
|
extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
|
2009-11-23 12:17:05 +00:00
|
|
|
struct buffer_head *bh, ext4_fsblk_t block,
|
|
|
|
unsigned long count, int flags);
|
2012-09-05 05:31:50 +00:00
|
|
|
extern int ext4_mb_alloc_groupinfo(struct super_block *sb,
|
|
|
|
ext4_group_t ngroups);
|
2009-01-06 02:36:19 +00:00
|
|
|
extern int ext4_mb_add_groupinfo(struct super_block *sb,
|
2008-07-11 23:27:31 +00:00
|
|
|
ext4_group_t i, struct ext4_group_desc *desc);
|
2011-07-27 01:46:07 +00:00
|
|
|
extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
|
2011-05-09 14:46:41 +00:00
|
|
|
ext4_fsblk_t block, unsigned long count);
|
2010-10-28 01:30:12 +00:00
|
|
|
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
|
2017-06-23 03:54:33 +00:00
|
|
|
extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid);
|
2020-10-15 20:37:59 +00:00
|
|
|
extern void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block,
|
2023-09-28 16:03:56 +00:00
|
|
|
int len, bool state);
|
2023-06-30 08:59:27 +00:00
|
|
|
static inline bool ext4_mb_cr_expensive(enum criteria cr)
|
|
|
|
{
|
|
|
|
return cr >= CR_GOAL_LEN_SLOW;
|
|
|
|
}
|
2010-10-28 01:30:12 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* inode.c */
|
2020-10-15 20:37:59 +00:00
|
|
|
void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
|
|
|
|
struct ext4_inode_info *ei);
|
2015-04-16 05:55:00 +00:00
|
|
|
int ext4_inode_is_fast_symlink(struct inode *inode);
|
2014-08-30 00:51:32 +00:00
|
|
|
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
|
2014-08-30 00:52:15 +00:00
|
|
|
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
|
2017-08-06 04:07:01 +00:00
|
|
|
int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
|
|
|
|
bool wait, struct buffer_head **bhs);
|
2016-03-09 04:08:10 +00:00
|
|
|
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
|
|
|
|
struct buffer_head *bh_result, int create);
|
2008-10-07 04:46:36 +00:00
|
|
|
int ext4_get_block(struct inode *inode, sector_t iblock,
|
2016-03-09 04:08:10 +00:00
|
|
|
struct buffer_head *bh_result, int create);
|
2012-12-10 19:05:57 +00:00
|
|
|
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
|
|
|
|
struct buffer_head *bh, int create);
|
2012-12-10 19:05:51 +00:00
|
|
|
int ext4_walk_page_buffers(handle_t *handle,
|
2021-08-16 09:57:04 +00:00
|
|
|
struct inode *inode,
|
2012-12-10 19:05:51 +00:00
|
|
|
struct buffer_head *head,
|
|
|
|
unsigned from,
|
|
|
|
unsigned to,
|
|
|
|
int *partial,
|
2021-08-16 09:57:04 +00:00
|
|
|
int (*fn)(handle_t *handle, struct inode *inode,
|
2012-12-10 19:05:51 +00:00
|
|
|
struct buffer_head *bh));
|
2021-08-16 09:57:04 +00:00
|
|
|
int do_journal_get_write_access(handle_t *handle, struct inode *inode,
|
2012-12-10 19:05:51 +00:00
|
|
|
struct buffer_head *bh);
|
2012-12-10 19:05:57 +00:00
|
|
|
#define FALL_BACK_TO_NONDELALLOC 1
|
|
|
|
#define CONVERT_INLINE_DATA 2
|
2006-10-11 08:20:50 +00:00
|
|
|
|
ext4: avoid declaring fs inconsistent due to invalid file handles
If we receive a file handle, either from NFS or open_by_handle_at(2),
and it points at an inode which has not been initialized, and the file
system has metadata checksums enabled, we shouldn't try to get the
inode, discover the checksum is invalid, and then declare the file
system as being inconsistent.
This can be reproduced by creating a test file system via "mke2fs -t
ext4 -O metadata_csum /tmp/foo.img 8M", mounting it, cd'ing into that
directory, and then running the following program.
#define _GNU_SOURCE
#include <fcntl.h>
struct handle {
struct file_handle fh;
unsigned char fid[MAX_HANDLE_SZ];
};
int main(int argc, char **argv)
{
struct handle h = {{8, 1 }, { 12, }};
open_by_handle_at(AT_FDCWD, &h.fh, O_RDONLY);
return 0;
}
Google-Bug-Id: 120690101
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org
2018-12-19 17:29:13 +00:00
|
|
|
typedef enum {
|
|
|
|
EXT4_IGET_NORMAL = 0,
|
|
|
|
EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
|
2022-10-26 04:23:09 +00:00
|
|
|
EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */
|
2023-05-24 03:49:48 +00:00
|
|
|
EXT4_IGET_BAD = 0x0004, /* Allow to iget a bad inode */
|
|
|
|
EXT4_IGET_EA_INODE = 0x0008 /* Inode should contain an EA value */
|
ext4: avoid declaring fs inconsistent due to invalid file handles
If we receive a file handle, either from NFS or open_by_handle_at(2),
and it points at an inode which has not been initialized, and the file
system has metadata checksums enabled, we shouldn't try to get the
inode, discover the checksum is invalid, and then declare the file
system as being inconsistent.
This can be reproduced by creating a test file system via "mke2fs -t
ext4 -O metadata_csum /tmp/foo.img 8M", mounting it, cd'ing into that
directory, and then running the following program.
#define _GNU_SOURCE
#include <fcntl.h>
struct handle {
struct file_handle fh;
unsigned char fid[MAX_HANDLE_SZ];
};
int main(int argc, char **argv)
{
struct handle h = {{8, 1 }, { 12, }};
open_by_handle_at(AT_FDCWD, &h.fh, O_RDONLY);
return 0;
}
Google-Bug-Id: 120690101
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@kernel.org
2018-12-19 17:29:13 +00:00
|
|
|
} ext4_iget_flags;
|
|
|
|
|
|
|
|
extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
|
|
|
ext4_iget_flags flags, const char *function,
|
|
|
|
unsigned int line);
|
|
|
|
|
|
|
|
#define ext4_iget(sb, ino, flags) \
|
|
|
|
__ext4_iget((sb), (ino), (flags), __func__, __LINE__)
|
|
|
|
|
2010-03-05 08:21:37 +00:00
|
|
|
extern int ext4_write_inode(struct inode *, struct writeback_control *);
|
2023-01-13 11:49:11 +00:00
|
|
|
extern int ext4_setattr(struct mnt_idmap *, struct dentry *,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct iattr *);
|
2022-08-27 06:58:47 +00:00
|
|
|
extern u32 ext4_dio_alignment(struct inode *inode);
|
2023-01-13 11:49:12 +00:00
|
|
|
extern int ext4_getattr(struct mnt_idmap *, const struct path *,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct kstat *, u32, unsigned int);
|
2010-06-07 17:16:22 +00:00
|
|
|
extern void ext4_evict_inode(struct inode *);
|
|
|
|
extern void ext4_clear_inode(struct inode *);
|
2023-01-13 11:49:12 +00:00
|
|
|
extern int ext4_file_getattr(struct mnt_idmap *, const struct path *,
|
2021-01-21 13:19:43 +00:00
|
|
|
struct kstat *, u32, unsigned int);
|
2011-05-27 10:53:02 +00:00
|
|
|
extern void ext4_dirty_inode(struct inode *, int);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern int ext4_change_inode_journal_flag(struct inode *, int);
|
|
|
|
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
|
2020-10-15 20:37:59 +00:00
|
|
|
extern int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
|
|
|
|
struct ext4_iloc *iloc);
|
2013-08-17 01:19:41 +00:00
|
|
|
extern int ext4_inode_attach_jinode(struct inode *inode);
|
2008-07-11 23:27:31 +00:00
|
|
|
extern int ext4_can_truncate(struct inode *inode);
|
2016-11-14 03:02:26 +00:00
|
|
|
extern int ext4_truncate(struct inode *);
|
2018-07-29 21:00:22 +00:00
|
|
|
extern int ext4_break_layouts(struct inode *);
|
2022-03-08 18:50:43 +00:00
|
|
|
extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
|
2020-05-28 14:59:59 +00:00
|
|
|
extern void ext4_set_inode_flags(struct inode *, bool init);
|
2009-02-26 06:04:07 +00:00
|
|
|
extern int ext4_alloc_da_blocks(struct inode *inode);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern void ext4_set_aops(struct inode *inode);
|
2006-10-11 08:21:03 +00:00
|
|
|
extern int ext4_writepage_trans_blocks(struct inode *);
|
2022-12-07 11:27:11 +00:00
|
|
|
extern int ext4_normal_submit_inode_data_buffers(struct jbd2_inode *jinode);
|
2008-08-20 02:16:03 +00:00
|
|
|
extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
|
2013-05-28 03:32:35 +00:00
|
|
|
extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
|
|
|
|
loff_t lstart, loff_t lend);
|
2018-10-03 02:20:50 +00:00
|
|
|
extern vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf);
|
2009-12-14 12:21:14 +00:00
|
|
|
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
|
2016-01-08 21:01:21 +00:00
|
|
|
extern int ext4_get_projid(struct inode *inode, kprojid_t *projid);
|
2018-10-01 18:33:24 +00:00
|
|
|
extern void ext4_da_release_space(struct inode *inode, int to_free);
|
2010-01-25 09:00:31 +00:00
|
|
|
extern void ext4_da_update_reserve_space(struct inode *inode,
|
|
|
|
int used, int quota_claim);
|
2015-12-07 20:09:35 +00:00
|
|
|
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
|
|
|
|
ext4_fsblk_t pblk, ext4_lblk_t len);
|
2011-06-27 23:40:50 +00:00
|
|
|
|
|
|
|
/* indirect.c */
|
|
|
|
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
|
struct ext4_map_blocks *map, int flags);
|
2013-06-04 16:56:55 +00:00
|
|
|
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
|
2013-04-03 16:47:17 +00:00
|
|
|
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
|
2014-07-15 10:03:38 +00:00
|
|
|
extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
|
|
|
|
ext4_lblk_t start, ext4_lblk_t end);
|
2011-06-27 20:36:31 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* ioctl.c */
|
2008-04-30 02:03:54 +00:00
|
|
|
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
|
2008-09-09 02:25:24 +00:00
|
|
|
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
|
2023-01-13 11:49:21 +00:00
|
|
|
int ext4_fileattr_set(struct mnt_idmap *idmap,
|
2021-04-07 12:36:43 +00:00
|
|
|
struct dentry *dentry, struct fileattr *fa);
|
|
|
|
int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
|
2020-10-15 20:37:59 +00:00
|
|
|
extern void ext4_reset_inode_seed(struct inode *inode);
|
2022-06-29 04:00:26 +00:00
|
|
|
int ext4_update_overhead(struct super_block *sb, bool force);
|
2023-06-01 09:44:57 +00:00
|
|
|
int ext4_force_shutdown(struct super_block *sb, u32 flags);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2008-01-29 04:58:26 +00:00
|
|
|
/* migrate.c */
|
2008-09-13 16:52:26 +00:00
|
|
|
extern int ext4_ext_migrate(struct inode *);
|
2013-04-11 03:32:52 +00:00
|
|
|
extern int ext4_ind_migrate(struct inode *inode);
|
2009-02-15 04:01:36 +00:00
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* namei.c */
|
2020-10-15 20:37:59 +00:00
|
|
|
extern int ext4_init_new_dir(handle_t *handle, struct inode *dir,
|
|
|
|
struct inode *inode);
|
2019-06-21 19:49:26 +00:00
|
|
|
extern int ext4_dirblock_csum_verify(struct inode *inode,
|
|
|
|
struct buffer_head *bh);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
|
2006-10-11 08:20:50 +00:00
|
|
|
__u32 start_minor_hash, __u32 *next_hash);
|
2015-05-18 17:14:47 +00:00
|
|
|
extern int ext4_search_dir(struct buffer_head *bh,
|
|
|
|
char *search_buf,
|
|
|
|
int buf_size,
|
|
|
|
struct inode *dir,
|
|
|
|
struct ext4_filename *fname,
|
|
|
|
unsigned int offset,
|
|
|
|
struct ext4_dir_entry_2 **res_dir);
|
2020-08-10 08:07:05 +00:00
|
|
|
extern int ext4_generic_delete_entry(struct inode *dir,
|
2012-12-10 19:06:00 +00:00
|
|
|
struct ext4_dir_entry_2 *de_del,
|
|
|
|
struct buffer_head *bh,
|
|
|
|
void *entry_buf,
|
|
|
|
int buf_size,
|
|
|
|
int csum_size);
|
2016-07-10 18:01:03 +00:00
|
|
|
extern bool ext4_empty_dir(struct inode *inode);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* resize.c */
|
2020-02-15 21:40:37 +00:00
|
|
|
extern void ext4_kvfree_array_rcu(void *to_free);
|
2006-10-11 08:20:53 +00:00
|
|
|
extern int ext4_group_add(struct super_block *sb,
|
|
|
|
struct ext4_new_group_data *input);
|
|
|
|
extern int ext4_group_extend(struct super_block *sb,
|
|
|
|
struct ext4_super_block *es,
|
|
|
|
ext4_fsblk_t n_blocks_count);
|
2012-01-04 22:09:44 +00:00
|
|
|
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
|
2021-12-13 13:56:18 +00:00
|
|
|
extern unsigned int ext4_list_backups(struct super_block *sb,
|
|
|
|
unsigned int *three, unsigned int *five,
|
|
|
|
unsigned int *seven);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* super.c */
|
2018-11-25 22:20:31 +00:00
|
|
|
extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
|
2022-07-14 18:07:17 +00:00
|
|
|
sector_t block, blk_opf_t op_flags);
|
2020-09-24 07:33:37 +00:00
|
|
|
extern struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
|
|
|
|
sector_t block);
|
2022-07-14 18:07:17 +00:00
|
|
|
extern void ext4_read_bh_nowait(struct buffer_head *bh, blk_opf_t op_flags,
|
2020-09-24 07:33:32 +00:00
|
|
|
bh_end_io_t *end_io);
|
2022-07-14 18:07:17 +00:00
|
|
|
extern int ext4_read_bh(struct buffer_head *bh, blk_opf_t op_flags,
|
2020-09-24 07:33:32 +00:00
|
|
|
bh_end_io_t *end_io);
|
2022-07-14 18:07:17 +00:00
|
|
|
extern int ext4_read_bh_lock(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
|
2020-09-24 07:33:35 +00:00
|
|
|
extern void ext4_sb_breadahead_unmovable(struct super_block *sb, sector_t block);
|
2015-09-23 16:46:17 +00:00
|
|
|
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
|
2012-07-09 20:27:05 +00:00
|
|
|
extern int ext4_calculate_overhead(struct super_block *sb);
|
2021-12-13 13:56:18 +00:00
|
|
|
extern __le32 ext4_superblock_csum(struct super_block *sb,
|
|
|
|
struct ext4_super_block *es);
|
2012-10-10 05:06:58 +00:00
|
|
|
extern void ext4_superblock_csum_set(struct super_block *sb);
|
2012-09-05 05:29:50 +00:00
|
|
|
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
|
|
|
|
ext4_group_t ngroup);
|
2013-02-08 18:00:31 +00:00
|
|
|
extern const char *ext4_decode_error(struct super_block *sb, int errno,
|
|
|
|
char nbuf[16]);
|
2018-05-12 15:39:40 +00:00
|
|
|
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
|
|
|
|
ext4_group_t block_group,
|
|
|
|
unsigned int flags);
|
2023-08-02 16:28:39 +00:00
|
|
|
extern unsigned int ext4_num_base_meta_blocks(struct super_block *sb,
|
|
|
|
ext4_group_t block_group);
|
2013-07-01 12:12:37 +00:00
|
|
|
|
2020-11-27 11:33:57 +00:00
|
|
|
extern __printf(7, 8)
|
|
|
|
void __ext4_error(struct super_block *, const char *, unsigned int, bool,
|
|
|
|
int, __u64, const char *, ...);
|
2020-03-28 23:33:43 +00:00
|
|
|
extern __printf(6, 7)
|
|
|
|
void __ext4_error_inode(struct inode *, const char *, unsigned int,
|
|
|
|
ext4_fsblk_t, int, const char *, ...);
|
2011-11-01 00:11:33 +00:00
|
|
|
extern __printf(5, 6)
|
2013-07-01 12:12:37 +00:00
|
|
|
void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
|
2011-11-01 00:11:33 +00:00
|
|
|
const char *, ...);
|
2010-07-27 15:56:40 +00:00
|
|
|
extern void __ext4_std_error(struct super_block *, const char *,
|
|
|
|
unsigned int, int);
|
2011-11-01 00:11:33 +00:00
|
|
|
extern __printf(4, 5)
|
|
|
|
void __ext4_warning(struct super_block *, const char *, unsigned int,
|
|
|
|
const char *, ...);
|
2015-06-15 18:50:26 +00:00
|
|
|
extern __printf(4, 5)
|
|
|
|
void __ext4_warning_inode(const struct inode *inode, const char *function,
|
|
|
|
unsigned int line, const char *fmt, ...);
|
2011-11-01 00:11:33 +00:00
|
|
|
extern __printf(3, 4)
|
2013-07-01 12:12:37 +00:00
|
|
|
void __ext4_msg(struct super_block *, const char *, const char *, ...);
|
2011-05-24 22:31:25 +00:00
|
|
|
extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
|
|
|
|
const char *, unsigned int, const char *);
|
2011-11-01 00:11:33 +00:00
|
|
|
extern __printf(7, 8)
|
|
|
|
void __ext4_grp_locked_error(const char *, unsigned int,
|
|
|
|
struct super_block *, ext4_group_t,
|
|
|
|
unsigned long, ext4_fsblk_t,
|
|
|
|
const char *, ...);
|
2013-07-01 12:12:37 +00:00
|
|
|
|
2015-06-15 18:50:26 +00:00
|
|
|
#define EXT4_ERROR_INODE(inode, fmt, a...) \
|
|
|
|
ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
|
|
|
|
|
2020-03-28 23:33:43 +00:00
|
|
|
#define EXT4_ERROR_INODE_ERR(inode, err, fmt, a...) \
|
|
|
|
__ext4_error_inode((inode), __func__, __LINE__, 0, (err), (fmt), ## a)
|
|
|
|
|
|
|
|
#define ext4_error_inode_block(inode, block, err, fmt, a...) \
|
|
|
|
__ext4_error_inode((inode), __func__, __LINE__, (block), (err), \
|
|
|
|
(fmt), ## a)
|
2015-06-15 18:50:26 +00:00
|
|
|
|
|
|
|
#define EXT4_ERROR_FILE(file, block, fmt, a...) \
|
|
|
|
ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
|
|
|
|
|
2020-11-27 11:33:57 +00:00
|
|
|
#define ext4_abort(sb, err, fmt, a...) \
|
|
|
|
__ext4_error((sb), __func__, __LINE__, true, (err), 0, (fmt), ## a)
|
|
|
|
|
2013-07-01 12:12:37 +00:00
|
|
|
#ifdef CONFIG_PRINTK
|
|
|
|
|
|
|
|
#define ext4_error_inode(inode, func, line, block, fmt, ...) \
|
2020-03-28 23:33:43 +00:00
|
|
|
__ext4_error_inode(inode, func, line, block, 0, fmt, ##__VA_ARGS__)
|
|
|
|
#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
|
|
|
|
__ext4_error_inode((inode), (func), (line), (block), \
|
|
|
|
(err), (fmt), ##__VA_ARGS__)
|
2013-07-01 12:12:37 +00:00
|
|
|
#define ext4_error_file(file, func, line, block, fmt, ...) \
|
|
|
|
__ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
|
|
|
|
#define ext4_error(sb, fmt, ...) \
|
2020-11-27 11:33:57 +00:00
|
|
|
__ext4_error((sb), __func__, __LINE__, false, 0, 0, (fmt), \
|
|
|
|
##__VA_ARGS__)
|
2020-03-28 23:33:43 +00:00
|
|
|
#define ext4_error_err(sb, err, fmt, ...) \
|
2020-11-27 11:33:57 +00:00
|
|
|
__ext4_error((sb), __func__, __LINE__, false, (err), 0, (fmt), \
|
|
|
|
##__VA_ARGS__)
|
2013-07-01 12:12:37 +00:00
|
|
|
#define ext4_warning(sb, fmt, ...) \
|
|
|
|
__ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
|
2015-06-15 18:50:26 +00:00
|
|
|
#define ext4_warning_inode(inode, fmt, ...) \
|
|
|
|
__ext4_warning_inode(inode, __func__, __LINE__, fmt, ##__VA_ARGS__)
|
2013-07-01 12:12:37 +00:00
|
|
|
#define ext4_msg(sb, level, fmt, ...) \
|
|
|
|
__ext4_msg(sb, level, fmt, ##__VA_ARGS__)
|
|
|
|
#define dump_mmp_msg(sb, mmp, msg) \
|
|
|
|
__dump_mmp_msg(sb, mmp, __func__, __LINE__, msg)
|
|
|
|
#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \
|
|
|
|
__ext4_grp_locked_error(__func__, __LINE__, sb, grp, ino, block, \
|
|
|
|
fmt, ##__VA_ARGS__)
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define ext4_error_inode(inode, func, line, block, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
2020-03-28 23:33:43 +00:00
|
|
|
__ext4_error_inode(inode, "", 0, block, 0, " "); \
|
|
|
|
} while (0)
|
|
|
|
#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_error_inode(inode, "", 0, block, err, " "); \
|
2013-07-01 12:12:37 +00:00
|
|
|
} while (0)
|
|
|
|
#define ext4_error_file(file, func, line, block, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_error_file(file, "", 0, block, " "); \
|
|
|
|
} while (0)
|
|
|
|
#define ext4_error(sb, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
2020-11-27 11:33:57 +00:00
|
|
|
__ext4_error(sb, "", 0, false, 0, 0, " "); \
|
2020-03-28 23:33:43 +00:00
|
|
|
} while (0)
|
|
|
|
#define ext4_error_err(sb, err, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
2020-11-27 11:33:57 +00:00
|
|
|
__ext4_error(sb, "", 0, false, err, 0, " "); \
|
2013-07-01 12:12:37 +00:00
|
|
|
} while (0)
|
|
|
|
#define ext4_warning(sb, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_warning(sb, "", 0, " "); \
|
|
|
|
} while (0)
|
2015-06-15 18:50:26 +00:00
|
|
|
#define ext4_warning_inode(inode, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_warning_inode(inode, "", 0, " "); \
|
|
|
|
} while (0)
|
2013-07-01 12:12:37 +00:00
|
|
|
#define ext4_msg(sb, level, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_msg(sb, "", " "); \
|
|
|
|
} while (0)
|
|
|
|
#define dump_mmp_msg(sb, mmp, msg) \
|
|
|
|
__dump_mmp_msg(sb, mmp, "", 0, "")
|
|
|
|
#define ext4_grp_locked_error(sb, grp, ino, block, fmt, ...) \
|
|
|
|
do { \
|
|
|
|
no_printk(fmt, ##__VA_ARGS__); \
|
|
|
|
__ext4_grp_locked_error("", 0, sb, grp, ino, block, " "); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-10-11 08:21:15 +00:00
|
|
|
extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
|
|
|
extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
|
|
|
extern ext4_fsblk_t ext4_inode_table(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
2011-09-09 23:08:51 +00:00
|
|
|
extern __u32 ext4_free_group_clusters(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
2009-01-06 03:20:24 +00:00
|
|
|
extern __u32 ext4_free_inodes_count(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
|
|
|
extern __u32 ext4_used_dirs_count(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
|
|
|
extern __u32 ext4_itable_unused_count(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg);
|
2006-10-11 08:21:15 +00:00
|
|
|
extern void ext4_block_bitmap_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, ext4_fsblk_t blk);
|
|
|
|
extern void ext4_inode_bitmap_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, ext4_fsblk_t blk);
|
|
|
|
extern void ext4_inode_table_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, ext4_fsblk_t blk);
|
2011-09-09 23:08:51 +00:00
|
|
|
extern void ext4_free_group_clusters_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg,
|
|
|
|
__u32 count);
|
2009-01-06 03:20:24 +00:00
|
|
|
extern void ext4_free_inodes_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, __u32 count);
|
|
|
|
extern void ext4_used_dirs_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, __u32 count);
|
|
|
|
extern void ext4_itable_unused_set(struct super_block *sb,
|
|
|
|
struct ext4_group_desc *bg, __u32 count);
|
2012-04-29 22:45:10 +00:00
|
|
|
extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
|
2009-05-01 23:44:44 +00:00
|
|
|
struct ext4_group_desc *gdp);
|
2012-04-29 22:45:10 +00:00
|
|
|
extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
|
|
|
|
struct ext4_group_desc *gdp);
|
2013-01-13 13:41:45 +00:00
|
|
|
extern int ext4_register_li_request(struct super_block *sb,
|
|
|
|
ext4_group_t first_not_zeroed);
|
2006-10-11 08:21:10 +00:00
|
|
|
|
2014-10-13 07:36:16 +00:00
|
|
|
static inline int ext4_has_metadata_csum(struct super_block *sb)
|
|
|
|
{
|
2015-10-17 20:18:43 +00:00
|
|
|
WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) &&
|
2014-10-13 07:36:16 +00:00
|
|
|
!EXT4_SB(sb)->s_chksum_driver);
|
|
|
|
|
2017-06-22 15:44:55 +00:00
|
|
|
return ext4_has_feature_metadata_csum(sb) &&
|
|
|
|
(EXT4_SB(sb)->s_chksum_driver != NULL);
|
2014-10-13 07:36:16 +00:00
|
|
|
}
|
2017-06-22 15:44:55 +00:00
|
|
|
|
|
|
|
static inline int ext4_has_group_desc_csum(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb);
|
|
|
|
}
|
|
|
|
|
2020-08-25 15:00:16 +00:00
|
|
|
#define ext4_read_incompat_64bit_val(es, name) \
|
|
|
|
(((es)->s_feature_incompat & cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT) \
|
|
|
|
? (ext4_fsblk_t)le32_to_cpu(es->name##_hi) << 32 : 0) | \
|
|
|
|
le32_to_cpu(es->name##_lo))
|
|
|
|
|
2006-10-11 08:21:10 +00:00
|
|
|
static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
|
|
|
|
{
|
2020-08-25 15:00:16 +00:00
|
|
|
return ext4_read_incompat_64bit_val(es, s_blocks_count);
|
2006-10-11 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline ext4_fsblk_t ext4_r_blocks_count(struct ext4_super_block *es)
|
|
|
|
{
|
2020-08-25 15:00:16 +00:00
|
|
|
return ext4_read_incompat_64bit_val(es, s_r_blocks_count);
|
2006-10-11 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline ext4_fsblk_t ext4_free_blocks_count(struct ext4_super_block *es)
|
|
|
|
{
|
2020-08-25 15:00:16 +00:00
|
|
|
return ext4_read_incompat_64bit_val(es, s_free_blocks_count);
|
2006-10-11 08:21:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_blocks_count_set(struct ext4_super_block *es,
|
|
|
|
ext4_fsblk_t blk)
|
|
|
|
{
|
2007-10-16 22:38:25 +00:00
|
|
|
es->s_blocks_count_lo = cpu_to_le32((u32)blk);
|
2006-10-11 08:21:10 +00:00
|
|
|
es->s_blocks_count_hi = cpu_to_le32(blk >> 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_free_blocks_count_set(struct ext4_super_block *es,
|
|
|
|
ext4_fsblk_t blk)
|
|
|
|
{
|
2007-10-16 22:38:25 +00:00
|
|
|
es->s_free_blocks_count_lo = cpu_to_le32((u32)blk);
|
2006-10-11 08:21:10 +00:00
|
|
|
es->s_free_blocks_count_hi = cpu_to_le32(blk >> 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
|
|
|
|
ext4_fsblk_t blk)
|
|
|
|
{
|
2007-10-16 22:38:25 +00:00
|
|
|
es->s_r_blocks_count_lo = cpu_to_le32((u32)blk);
|
2006-10-11 08:21:10 +00:00
|
|
|
es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
|
|
|
|
}
|
|
|
|
|
2017-06-22 01:09:57 +00:00
|
|
|
static inline loff_t ext4_isize(struct super_block *sb,
|
|
|
|
struct ext4_inode *raw_inode)
|
2008-01-29 04:58:27 +00:00
|
|
|
{
|
2017-06-22 01:09:57 +00:00
|
|
|
if (ext4_has_feature_largedir(sb) ||
|
|
|
|
S_ISREG(le16_to_cpu(raw_inode->i_mode)))
|
2009-01-17 23:41:37 +00:00
|
|
|
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
|
|
|
|
le32_to_cpu(raw_inode->i_size_lo);
|
2017-06-22 01:09:57 +00:00
|
|
|
|
|
|
|
return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
|
2008-01-29 04:58:27 +00:00
|
|
|
}
|
2006-10-11 08:21:10 +00:00
|
|
|
|
2008-01-29 04:58:27 +00:00
|
|
|
static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
|
|
|
|
{
|
|
|
|
raw_inode->i_size_lo = cpu_to_le32(i_size);
|
|
|
|
raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
|
|
|
|
}
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2009-05-01 12:50:38 +00:00
|
|
|
/*
|
|
|
|
* Reading s_groups_count requires using smp_rmb() afterwards. See
|
|
|
|
* the locking protocol documented in the comments of ext4_group_add()
|
|
|
|
* in resize.c
|
|
|
|
*/
|
|
|
|
static inline ext4_group_t ext4_get_groups_count(struct super_block *sb)
|
|
|
|
{
|
|
|
|
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
|
|
|
|
|
|
|
|
smp_rmb();
|
|
|
|
return ngroups;
|
|
|
|
}
|
2008-01-29 05:19:52 +00:00
|
|
|
|
2008-07-11 23:27:31 +00:00
|
|
|
static inline ext4_group_t ext4_flex_group(struct ext4_sb_info *sbi,
|
|
|
|
ext4_group_t block_group)
|
|
|
|
{
|
|
|
|
return block_group >> sbi->s_log_groups_per_flex;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int ext4_flex_bg_size(struct ext4_sb_info *sbi)
|
|
|
|
{
|
|
|
|
return 1 << sbi->s_log_groups_per_flex;
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:53 +00:00
|
|
|
#define ext4_std_error(sb, errno) \
|
2006-10-11 08:20:50 +00:00
|
|
|
do { \
|
|
|
|
if ((errno)) \
|
2010-07-27 15:56:40 +00:00
|
|
|
__ext4_std_error((sb), __func__, __LINE__, (errno)); \
|
2006-10-11 08:20:50 +00:00
|
|
|
} while (0)
|
|
|
|
|
2008-10-09 14:56:23 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2011-09-09 23:16:51 +00:00
|
|
|
/* Each CPU can accumulate percpu_counter_batch clusters in their local
|
|
|
|
* counters. So we need to make sure we have free clusters more
|
2009-01-06 22:41:04 +00:00
|
|
|
* than percpu_counter_batch * nr_cpu_ids. Also add a window of 4 times.
|
2008-10-09 14:56:23 +00:00
|
|
|
*/
|
2011-09-09 23:16:51 +00:00
|
|
|
#define EXT4_FREECLUSTERS_WATERMARK (4 * (percpu_counter_batch * nr_cpu_ids))
|
2008-10-09 14:56:23 +00:00
|
|
|
#else
|
2011-09-09 23:16:51 +00:00
|
|
|
#define EXT4_FREECLUSTERS_WATERMARK 0
|
2008-10-09 14:56:23 +00:00
|
|
|
#endif
|
|
|
|
|
2022-01-21 07:06:11 +00:00
|
|
|
/* Update i_disksize. Requires i_rwsem to avoid races with truncate */
|
2008-09-13 17:06:18 +00:00
|
|
|
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
|
|
|
|
{
|
2013-08-17 14:09:31 +00:00
|
|
|
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
|
2016-01-22 20:40:57 +00:00
|
|
|
!inode_is_locked(inode));
|
2013-08-17 14:09:31 +00:00
|
|
|
down_write(&EXT4_I(inode)->i_data_sem);
|
|
|
|
if (newsize > EXT4_I(inode)->i_disksize)
|
2020-02-07 14:29:11 +00:00
|
|
|
WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
|
2013-08-17 14:09:31 +00:00
|
|
|
up_write(&EXT4_I(inode)->i_data_sem);
|
|
|
|
}
|
|
|
|
|
2022-01-21 07:06:11 +00:00
|
|
|
/* Update i_size, i_disksize. Requires i_rwsem to avoid races with truncate */
|
2014-08-23 21:48:28 +00:00
|
|
|
static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
|
|
|
|
{
|
|
|
|
int changed = 0;
|
|
|
|
|
|
|
|
if (newsize > inode->i_size) {
|
|
|
|
i_size_write(inode, newsize);
|
|
|
|
changed = 1;
|
|
|
|
}
|
|
|
|
if (newsize > EXT4_I(inode)->i_disksize) {
|
|
|
|
ext4_update_i_disksize(inode, newsize);
|
|
|
|
changed |= 2;
|
|
|
|
}
|
|
|
|
return changed;
|
|
|
|
}
|
|
|
|
|
2015-12-07 19:34:49 +00:00
|
|
|
int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
|
|
|
|
loff_t len);
|
|
|
|
|
2009-01-06 03:19:52 +00:00
|
|
|
struct ext4_group_info {
|
|
|
|
unsigned long bb_state;
|
2020-09-28 11:36:35 +00:00
|
|
|
#ifdef AGGRESSIVE_CHECK
|
|
|
|
unsigned long bb_check_counter;
|
|
|
|
#endif
|
2009-01-06 03:19:52 +00:00
|
|
|
struct rb_root bb_free_root;
|
2009-08-26 02:36:45 +00:00
|
|
|
ext4_grpblk_t bb_first_free; /* first free block */
|
|
|
|
ext4_grpblk_t bb_free; /* total free blocks */
|
|
|
|
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
|
2022-09-08 09:21:28 +00:00
|
|
|
int bb_avg_fragment_size_order; /* order of average
|
|
|
|
fragment in BG */
|
2010-05-16 19:00:00 +00:00
|
|
|
ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
ext4_group_t bb_group; /* Group number */
|
2009-01-06 03:19:52 +00:00
|
|
|
struct list_head bb_prealloc_list;
|
|
|
|
#ifdef DOUBLE_CHECK
|
|
|
|
void *bb_bitmap;
|
|
|
|
#endif
|
|
|
|
struct rw_semaphore alloc_sem;
|
2022-09-08 09:21:28 +00:00
|
|
|
struct list_head bb_avg_fragment_size_node;
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-01 17:21:27 +00:00
|
|
|
struct list_head bb_largest_free_order_node;
|
2009-08-26 02:36:45 +00:00
|
|
|
ext4_grpblk_t bb_counters[]; /* Nr of free power-of-two-block
|
|
|
|
* regions, index is order.
|
|
|
|
* bb_counters[3] = 5 means
|
|
|
|
* 5 free 8-block regions. */
|
2009-01-06 03:19:52 +00:00
|
|
|
};
|
|
|
|
|
2011-07-11 04:03:38 +00:00
|
|
|
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
|
|
|
|
#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
|
ext4: mark block group as corrupt on block bitmap error
When we notice a block-bitmap corruption (because of device failure or
something else), we should mark this group as corrupt and prevent
further block allocations/deallocations from it. Currently, we end up
generating one error message for every block in the bitmap. This
potentially could make the system unstable as noticed in some
bugs. With this patch, the error will be printed only the first time
and mark the entire block group as corrupted. This prevents future
access allocations/deallocations from it.
Also tested by corrupting the block
bitmap and forcefully introducing the mb_free_blocks error:
(1) create a largefile (2Gb)
$ dd if=/dev/zero of=largefile oflag=direct bs=10485760 count=200
(2) umount filesystem. use dumpe2fs to see which block-bitmaps
are in use by largefile and note their block numbers
(3) use dd to zero-out the used block bitmaps
$ dd if=/dev/zero of=/dev/hdc4 bs=4096 seek=14 count=8 oflag=direct
(4) mount the FS and delete the largefile.
(5) recreate the largefile. verify that the new largefile does not
get any blocks from the groups marked as bad.
Without the patch, we will see mb_free_blocks error for each bit in
each zero'ed out bitmap at (4). With the patch, we only see the error
once per blockgroup:
[ 309.706803] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 15: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.720824] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 14: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.732858] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.748321] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 13: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.760331] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.769695] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 12: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.781721] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.798166] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 11: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.810184] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.819532] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 10: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
Google-Bug-Id: 7258357
[darrick.wong@oracle.com]
Further modifications (by Darrick) to make more obvious that this corruption
bit applies to blocks only. Set the corruption flag if the block group bitmap
verification fails.
Original-author: Aditya Kali <adityakali@google.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2013-08-28 21:35:51 +00:00
|
|
|
#define EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT 2
|
2013-08-28 22:32:58 +00:00
|
|
|
#define EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT 3
|
2018-05-12 15:39:40 +00:00
|
|
|
#define EXT4_GROUP_INFO_BBITMAP_CORRUPT \
|
|
|
|
(1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT)
|
|
|
|
#define EXT4_GROUP_INFO_IBITMAP_CORRUPT \
|
|
|
|
(1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT)
|
2020-04-21 07:54:07 +00:00
|
|
|
#define EXT4_GROUP_INFO_BBITMAP_READ_BIT 4
|
2009-01-06 03:19:52 +00:00
|
|
|
|
|
|
|
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
|
|
|
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
ext4: mark block group as corrupt on block bitmap error
When we notice a block-bitmap corruption (because of device failure or
something else), we should mark this group as corrupt and prevent
further block allocations/deallocations from it. Currently, we end up
generating one error message for every block in the bitmap. This
potentially could make the system unstable as noticed in some
bugs. With this patch, the error will be printed only the first time
and mark the entire block group as corrupted. This prevents future
access allocations/deallocations from it.
Also tested by corrupting the block
bitmap and forcefully introducing the mb_free_blocks error:
(1) create a largefile (2Gb)
$ dd if=/dev/zero of=largefile oflag=direct bs=10485760 count=200
(2) umount filesystem. use dumpe2fs to see which block-bitmaps
are in use by largefile and note their block numbers
(3) use dd to zero-out the used block bitmaps
$ dd if=/dev/zero of=/dev/hdc4 bs=4096 seek=14 count=8 oflag=direct
(4) mount the FS and delete the largefile.
(5) recreate the largefile. verify that the new largefile does not
get any blocks from the groups marked as bad.
Without the patch, we will see mb_free_blocks error for each bit in
each zero'ed out bitmap at (4). With the patch, we only see the error
once per blockgroup:
[ 309.706803] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 15: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.720824] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 14: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.732858] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.748321] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 13: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.760331] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.769695] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 12: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.781721] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.798166] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 11: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
[ 309.810184] EXT4-fs error (device sdb4) in ext4_free_blocks:4802: IO failure
[ 309.819532] EXT4-fs error (device sdb4): ext4_mb_generate_buddy:735: group 10: 32768 clusters in bitmap, 0 in gd. blk grp corrupted.
Google-Bug-Id: 7258357
[darrick.wong@oracle.com]
Further modifications (by Darrick) to make more obvious that this corruption
bit applies to blocks only. Set the corruption flag if the block group bitmap
verification fails.
Original-author: Aditya Kali <adityakali@google.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2013-08-28 21:35:51 +00:00
|
|
|
#define EXT4_MB_GRP_BBITMAP_CORRUPT(grp) \
|
|
|
|
(test_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
|
2013-08-28 22:32:58 +00:00
|
|
|
#define EXT4_MB_GRP_IBITMAP_CORRUPT(grp) \
|
|
|
|
(test_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
|
2009-01-06 03:19:52 +00:00
|
|
|
|
2011-07-11 04:03:38 +00:00
|
|
|
#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
|
|
|
|
(test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
|
|
|
#define EXT4_MB_GRP_SET_TRIMMED(grp) \
|
|
|
|
(set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
|
|
|
#define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \
|
|
|
|
(clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
|
2020-04-21 07:54:07 +00:00
|
|
|
#define EXT4_MB_GRP_TEST_AND_SET_READ(grp) \
|
|
|
|
(test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_READ_BIT, &((grp)->bb_state)))
|
2011-07-11 04:03:38 +00:00
|
|
|
|
ext4: Avoid group preallocation for closed files
Currently the group preallocation code tries to find a large (512)
free block from which to do per-cpu group allocation for small files.
The problem with this scheme is that it leaves the filesystem horribly
fragmented. In the worst case, if the filesystem is unmounted and
remounted (after a system shutdown, for example) we forget the fact
that wee were using a particular (now-partially filled) 512 block
extent. So the next time we try to allocate space for a small file,
we will find *another* completely free 512 block chunk to allocate
small files. Given that there are 32,768 blocks in a block group,
after 64 iterations of "mount, write one 4k file in a directory,
unmount", the block group will have 64 files, each separated by 511
blocks, and the block group will no longer have any free 512
completely free chunks of blocks for group preallocation space.
So if we try to allocate blocks for a file that has been closed, such
that we know the final size of the file, and the filesystem is not
busy, avoid using group preallocation.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-18 17:34:02 +00:00
|
|
|
#define EXT4_MAX_CONTENTION 8
|
|
|
|
#define EXT4_CONTENTION_THRESHOLD 2
|
|
|
|
|
2009-05-03 00:35:09 +00:00
|
|
|
static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
|
|
|
|
ext4_group_t group)
|
2009-01-06 03:19:52 +00:00
|
|
|
{
|
2009-05-03 00:35:09 +00:00
|
|
|
return bgl_lock_ptr(EXT4_SB(sb)->s_blockgroup_lock, group);
|
|
|
|
}
|
2009-01-06 03:19:52 +00:00
|
|
|
|
ext4: Avoid group preallocation for closed files
Currently the group preallocation code tries to find a large (512)
free block from which to do per-cpu group allocation for small files.
The problem with this scheme is that it leaves the filesystem horribly
fragmented. In the worst case, if the filesystem is unmounted and
remounted (after a system shutdown, for example) we forget the fact
that wee were using a particular (now-partially filled) 512 block
extent. So the next time we try to allocate space for a small file,
we will find *another* completely free 512 block chunk to allocate
small files. Given that there are 32,768 blocks in a block group,
after 64 iterations of "mount, write one 4k file in a directory,
unmount", the block group will have 64 files, each separated by 511
blocks, and the block group will no longer have any free 512
completely free chunks of blocks for group preallocation space.
So if we try to allocate blocks for a file that has been closed, such
that we know the final size of the file, and the filesystem is not
busy, avoid using group preallocation.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-18 17:34:02 +00:00
|
|
|
/*
|
|
|
|
* Returns true if the filesystem is busy enough that attempts to
|
|
|
|
* access the block group locks has run into contention.
|
|
|
|
*/
|
|
|
|
static inline int ext4_fs_is_busy(struct ext4_sb_info *sbi)
|
|
|
|
{
|
|
|
|
return (atomic_read(&sbi->s_lock_busy) > EXT4_CONTENTION_THRESHOLD);
|
|
|
|
}
|
|
|
|
|
2009-05-03 00:35:09 +00:00
|
|
|
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
|
|
|
|
{
|
ext4: Avoid group preallocation for closed files
Currently the group preallocation code tries to find a large (512)
free block from which to do per-cpu group allocation for small files.
The problem with this scheme is that it leaves the filesystem horribly
fragmented. In the worst case, if the filesystem is unmounted and
remounted (after a system shutdown, for example) we forget the fact
that wee were using a particular (now-partially filled) 512 block
extent. So the next time we try to allocate space for a small file,
we will find *another* completely free 512 block chunk to allocate
small files. Given that there are 32,768 blocks in a block group,
after 64 iterations of "mount, write one 4k file in a directory,
unmount", the block group will have 64 files, each separated by 511
blocks, and the block group will no longer have any free 512
completely free chunks of blocks for group preallocation space.
So if we try to allocate blocks for a file that has been closed, such
that we know the final size of the file, and the filesystem is not
busy, avoid using group preallocation.
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2009-09-18 17:34:02 +00:00
|
|
|
spinlock_t *lock = ext4_group_lock_ptr(sb, group);
|
|
|
|
if (spin_trylock(lock))
|
|
|
|
/*
|
|
|
|
* We're able to grab the lock right away, so drop the
|
|
|
|
* lock contention counter.
|
|
|
|
*/
|
|
|
|
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, -1, 0);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* The lock is busy, so bump the contention counter,
|
|
|
|
* and then wait on the spin lock.
|
|
|
|
*/
|
|
|
|
atomic_add_unless(&EXT4_SB(sb)->s_lock_busy, 1,
|
|
|
|
EXT4_MAX_CONTENTION);
|
|
|
|
spin_lock(lock);
|
|
|
|
}
|
2009-01-06 03:19:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_unlock_group(struct super_block *sb,
|
|
|
|
ext4_group_t group)
|
|
|
|
{
|
2009-05-03 00:35:09 +00:00
|
|
|
spin_unlock(ext4_group_lock_ptr(sb, group));
|
2009-01-06 03:19:52 +00:00
|
|
|
}
|
|
|
|
|
2020-10-22 03:20:59 +00:00
|
|
|
#ifdef CONFIG_QUOTA
|
|
|
|
static inline bool ext4_quota_capable(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return (test_opt(sb, QUOTA) || ext4_has_feature_quota(sb));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool ext4_is_quota_journalled(struct super_block *sb)
|
|
|
|
{
|
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
|
|
|
|
|
|
|
return (ext4_has_feature_quota(sb) ||
|
|
|
|
sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]);
|
|
|
|
}
|
2021-08-16 09:57:05 +00:00
|
|
|
int ext4_enable_quotas(struct super_block *sb);
|
2020-10-22 03:20:59 +00:00
|
|
|
#endif
|
|
|
|
|
2011-06-27 23:16:02 +00:00
|
|
|
/*
|
|
|
|
* Block validity checking
|
|
|
|
*/
|
|
|
|
#define ext4_check_indirect_blockref(inode, bh) \
|
|
|
|
ext4_check_blockref(__func__, __LINE__, inode, \
|
|
|
|
(__le32 *)(bh)->b_data, \
|
|
|
|
EXT4_ADDR_PER_BLOCK((inode)->i_sb))
|
|
|
|
|
|
|
|
#define ext4_ind_check_inode(inode) \
|
|
|
|
ext4_check_blockref(__func__, __LINE__, inode, \
|
|
|
|
EXT4_I(inode)->i_data, \
|
|
|
|
EXT4_NDIR_BLOCKS)
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/*
|
|
|
|
* Inodes and files operations
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* dir.c */
|
2006-10-11 08:20:53 +00:00
|
|
|
extern const struct file_operations ext4_dir_operations;
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* file.c */
|
2007-02-12 08:55:40 +00:00
|
|
|
extern const struct inode_operations ext4_file_inode_operations;
|
2006-10-11 08:20:53 +00:00
|
|
|
extern const struct file_operations ext4_file_operations;
|
2010-10-28 01:30:06 +00:00
|
|
|
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2013-02-09 20:23:03 +00:00
|
|
|
/* inline.c */
|
|
|
|
extern int ext4_get_max_inline_size(struct inode *inode);
|
|
|
|
extern int ext4_find_inline_data_nolock(struct inode *inode);
|
|
|
|
extern int ext4_destroy_inline_data(handle_t *handle, struct inode *inode);
|
|
|
|
|
2023-03-24 18:01:09 +00:00
|
|
|
int ext4_readpage_inline(struct inode *inode, struct folio *folio);
|
2013-02-09 20:23:03 +00:00
|
|
|
extern int ext4_try_to_write_inline_data(struct address_space *mapping,
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t pos, unsigned len,
|
|
|
|
struct page **pagep);
|
2023-05-15 10:40:44 +00:00
|
|
|
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|
|
|
unsigned copied, struct folio *folio);
|
2013-02-09 20:23:03 +00:00
|
|
|
extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
|
|
|
|
struct inode *inode,
|
|
|
|
loff_t pos, unsigned len,
|
|
|
|
struct page **pagep,
|
|
|
|
void **fsdata);
|
2015-05-18 17:14:47 +00:00
|
|
|
extern int ext4_try_add_inline_entry(handle_t *handle,
|
|
|
|
struct ext4_filename *fname,
|
2016-01-08 21:00:31 +00:00
|
|
|
struct inode *dir, struct inode *inode);
|
2013-02-09 20:23:03 +00:00
|
|
|
extern int ext4_try_create_inline_dir(handle_t *handle,
|
|
|
|
struct inode *parent,
|
|
|
|
struct inode *inode);
|
|
|
|
extern int ext4_read_inline_dir(struct file *filp,
|
2013-05-17 20:08:53 +00:00
|
|
|
struct dir_context *ctx,
|
2013-02-09 20:23:03 +00:00
|
|
|
int *has_inline_data);
|
2019-06-22 01:57:00 +00:00
|
|
|
extern int ext4_inlinedir_to_tree(struct file *dir_file,
|
|
|
|
struct inode *dir, ext4_lblk_t block,
|
|
|
|
struct dx_hash_info *hinfo,
|
|
|
|
__u32 start_hash, __u32 start_minor_hash,
|
|
|
|
int *has_inline_data);
|
2013-02-09 20:23:03 +00:00
|
|
|
extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
|
2015-05-18 17:14:47 +00:00
|
|
|
struct ext4_filename *fname,
|
2013-02-09 20:23:03 +00:00
|
|
|
struct ext4_dir_entry_2 **res_dir,
|
|
|
|
int *has_inline_data);
|
|
|
|
extern int ext4_delete_inline_entry(handle_t *handle,
|
|
|
|
struct inode *dir,
|
|
|
|
struct ext4_dir_entry_2 *de_del,
|
|
|
|
struct buffer_head *bh,
|
|
|
|
int *has_inline_data);
|
2016-07-10 18:01:03 +00:00
|
|
|
extern bool empty_inline_dir(struct inode *dir, int *has_inline_data);
|
2013-02-09 20:23:03 +00:00
|
|
|
extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
|
|
|
|
struct ext4_dir_entry_2 **parent_de,
|
|
|
|
int *retval);
|
2022-06-30 09:01:00 +00:00
|
|
|
extern void *ext4_read_inline_link(struct inode *inode);
|
2017-10-01 21:57:54 +00:00
|
|
|
|
|
|
|
struct iomap;
|
|
|
|
extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
|
|
|
|
|
2017-01-23 00:35:49 +00:00
|
|
|
extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
|
2013-02-09 20:23:03 +00:00
|
|
|
|
|
|
|
extern int ext4_convert_inline_data(struct inode *inode);
|
|
|
|
|
2014-07-15 14:10:04 +00:00
|
|
|
static inline int ext4_has_inline_data(struct inode *inode)
|
|
|
|
{
|
|
|
|
return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
|
|
|
|
EXT4_I(inode)->i_inline_off;
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
/* namei.c */
|
2007-02-12 08:55:40 +00:00
|
|
|
extern const struct inode_operations ext4_dir_inode_operations;
|
|
|
|
extern const struct inode_operations ext4_special_inode_operations;
|
2009-05-01 17:49:15 +00:00
|
|
|
extern struct dentry *ext4_get_parent(struct dentry *child);
|
2012-12-10 19:05:57 +00:00
|
|
|
extern struct ext4_dir_entry_2 *ext4_init_dot_dotdot(struct inode *inode,
|
|
|
|
struct ext4_dir_entry_2 *de,
|
|
|
|
int blocksize, int csum_size,
|
|
|
|
unsigned int parent_ino, int dotdot_real_len);
|
2019-06-21 20:31:47 +00:00
|
|
|
extern void ext4_initialize_dirent_tail(struct buffer_head *bh,
|
|
|
|
unsigned int blocksize);
|
2019-06-21 19:49:26 +00:00
|
|
|
extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
|
|
|
|
struct buffer_head *bh);
|
2022-11-06 22:48:36 +00:00
|
|
|
extern int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
|
|
|
|
struct inode *inode, struct dentry *dentry);
|
2020-10-15 20:37:59 +00:00
|
|
|
extern int __ext4_link(struct inode *dir, struct inode *inode,
|
|
|
|
struct dentry *dentry);
|
ext4: Support case-insensitive file name lookups
This patch implements the actual support for case-insensitive file name
lookups in ext4, based on the feature bit and the encoding stored in the
superblock.
A filesystem that has the casefold feature set is able to configure
directories with the +F (EXT4_CASEFOLD_FL) attribute, enabling lookups
to succeed in that directory in a case-insensitive fashion, i.e: match
a directory entry even if the name used by userspace is not a byte per
byte match with the disk name, but is an equivalent case-insensitive
version of the Unicode string. This operation is called a
case-insensitive file name lookup.
The feature is configured as an inode attribute applied to directories
and inherited by its children. This attribute can only be enabled on
empty directories for filesystems that support the encoding feature,
thus preventing collision of file names that only differ by case.
* dcache handling:
For a +F directory, Ext4 only stores the first equivalent name dentry
used in the dcache. This is done to prevent unintentional duplication of
dentries in the dcache, while also allowing the VFS code to quickly find
the right entry in the cache despite which equivalent string was used in
a previous lookup, without having to resort to ->lookup().
d_hash() of casefolded directories is implemented as the hash of the
casefolded string, such that we always have a well-known bucket for all
the equivalencies of the same string. d_compare() uses the
utf8_strncasecmp() infrastructure, which handles the comparison of
equivalent, same case, names as well.
For now, negative lookups are not inserted in the dcache, since they
would need to be invalidated anyway, because we can't trust missing file
dentries. This is bad for performance but requires some leveraging of
the vfs layer to fix. We can live without that for now, and so does
everyone else.
* on-disk data:
Despite using a specific version of the name as the internal
representation within the dcache, the name stored and fetched from the
disk is a byte-per-byte match with what the user requested, making this
implementation 'name-preserving'. i.e. no actual information is lost
when writing to storage.
DX is supported by modifying the hashes used in +F directories to make
them case/encoding-aware. The new disk hashes are calculated as the
hash of the full casefolded string, instead of the string directly.
This allows us to efficiently search for file names in the htree without
requiring the user to provide an exact name.
* Dealing with invalid sequences:
By default, when a invalid UTF-8 sequence is identified, ext4 will treat
it as an opaque byte sequence, ignoring the encoding and reverting to
the old behavior for that unique file. This means that case-insensitive
file name lookup will not work only for that file. An optional bit can
be set in the superblock telling the filesystem code and userspace tools
to enforce the encoding. When that optional bit is set, any attempt to
create a file name using an invalid UTF-8 sequence will fail and return
an error to userspace.
* Normalization algorithm:
The UTF-8 algorithms used to compare strings in ext4 is implemented
lives in fs/unicode, and is based on a previous version developed by
SGI. It implements the Canonical decomposition (NFD) algorithm
described by the Unicode specification 12.1, or higher, combined with
the elimination of ignorable code points (NFDi) and full
case-folding (CF) as documented in fs/unicode/utf8_norm.c.
NFD seems to be the best normalization method for EXT4 because:
- It has a lower cost than NFC/NFKC (which requires
decomposing to NFD as an intermediary step)
- It doesn't eliminate important semantic meaning like
compatibility decompositions.
Although:
- This implementation is not completely linguistic accurate, because
different languages have conflicting rules, which would require the
specialization of the filesystem to a given locale, which brings all
sorts of problems for removable media and for users who use more than
one language.
Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.co.uk>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2019-04-25 18:12:08 +00:00
|
|
|
|
2013-04-19 21:53:09 +00:00
|
|
|
#define S_SHIFT 12
|
2017-08-05 23:00:31 +00:00
|
|
|
static const unsigned char ext4_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
|
2013-04-19 21:53:09 +00:00
|
|
|
[S_IFREG >> S_SHIFT] = EXT4_FT_REG_FILE,
|
|
|
|
[S_IFDIR >> S_SHIFT] = EXT4_FT_DIR,
|
|
|
|
[S_IFCHR >> S_SHIFT] = EXT4_FT_CHRDEV,
|
|
|
|
[S_IFBLK >> S_SHIFT] = EXT4_FT_BLKDEV,
|
|
|
|
[S_IFIFO >> S_SHIFT] = EXT4_FT_FIFO,
|
|
|
|
[S_IFSOCK >> S_SHIFT] = EXT4_FT_SOCK,
|
|
|
|
[S_IFLNK >> S_SHIFT] = EXT4_FT_SYMLINK,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ext4_set_de_type(struct super_block *sb,
|
|
|
|
struct ext4_dir_entry_2 *de,
|
|
|
|
umode_t mode) {
|
2015-10-17 20:18:43 +00:00
|
|
|
if (ext4_has_feature_filetype(sb))
|
2013-04-19 21:53:09 +00:00
|
|
|
de->file_type = ext4_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
|
|
|
|
}
|
|
|
|
|
2015-04-08 04:00:32 +00:00
|
|
|
/* readpages.c */
|
2020-06-02 04:47:20 +00:00
|
|
|
extern int ext4_mpage_readpages(struct inode *inode,
|
2023-03-24 18:01:23 +00:00
|
|
|
struct readahead_control *rac, struct folio *folio);
|
2019-07-22 16:26:24 +00:00
|
|
|
extern int __init ext4_init_post_read_processing(void);
|
|
|
|
extern void ext4_exit_post_read_processing(void);
|
2006-10-11 08:20:50 +00:00
|
|
|
|
|
|
|
/* symlink.c */
|
2015-04-27 21:51:30 +00:00
|
|
|
extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
|
2007-02-12 08:55:40 +00:00
|
|
|
extern const struct inode_operations ext4_symlink_inode_operations;
|
|
|
|
extern const struct inode_operations ext4_fast_symlink_inode_operations;
|
2006-10-11 08:20:50 +00:00
|
|
|
|
2015-09-23 16:44:17 +00:00
|
|
|
/* sysfs.c */
|
2021-06-11 14:02:08 +00:00
|
|
|
extern void ext4_notify_error_sysfs(struct ext4_sb_info *sbi);
|
2015-09-23 16:44:17 +00:00
|
|
|
extern int ext4_register_sysfs(struct super_block *sb);
|
2015-09-23 16:46:17 +00:00
|
|
|
extern void ext4_unregister_sysfs(struct super_block *sb);
|
2015-09-23 16:44:17 +00:00
|
|
|
extern int __init ext4_init_sysfs(void);
|
|
|
|
extern void ext4_exit_sysfs(void);
|
|
|
|
|
2009-05-17 19:38:01 +00:00
|
|
|
/* block_validity */
|
|
|
|
extern void ext4_release_system_zone(struct super_block *sb);
|
|
|
|
extern int ext4_setup_system_zone(struct super_block *sb);
|
2010-10-28 01:30:14 +00:00
|
|
|
extern int __init ext4_init_system_zone(void);
|
|
|
|
extern void ext4_exit_system_zone(void);
|
2020-07-28 13:04:34 +00:00
|
|
|
extern int ext4_inode_block_valid(struct inode *inode,
|
|
|
|
ext4_fsblk_t start_blk,
|
|
|
|
unsigned int count);
|
2011-06-27 23:16:02 +00:00
|
|
|
extern int ext4_check_blockref(const char *, unsigned int,
|
|
|
|
struct inode *, __le32 *, unsigned int);
|
2022-02-16 07:02:49 +00:00
|
|
|
extern int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
|
|
|
|
ext4_fsblk_t start_blk, unsigned int count);
|
|
|
|
|
2009-05-17 19:38:01 +00:00
|
|
|
|
2006-10-11 08:21:03 +00:00
|
|
|
/* extents.c */
|
2012-11-28 18:03:30 +00:00
|
|
|
struct ext4_ext_path;
|
|
|
|
struct ext4_extent;
|
|
|
|
|
2013-08-28 18:47:06 +00:00
|
|
|
/*
|
|
|
|
* Maximum number of logical blocks in a file; ext4_extent's ee_block is
|
|
|
|
* __le32.
|
|
|
|
*/
|
|
|
|
#define EXT_MAX_BLOCKS 0xffffffff
|
|
|
|
|
2020-04-27 01:34:37 +00:00
|
|
|
extern void ext4_ext_tree_init(handle_t *handle, struct inode *inode);
|
2013-06-04 17:01:11 +00:00
|
|
|
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
|
2010-05-16 23:00:00 +00:00
|
|
|
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
|
struct ext4_map_blocks *map, int flags);
|
2016-11-14 03:02:28 +00:00
|
|
|
extern int ext4_ext_truncate(handle_t *, struct inode *);
|
2013-04-03 16:45:17 +00:00
|
|
|
extern int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
|
|
|
|
ext4_lblk_t end);
|
2006-10-11 08:21:03 +00:00
|
|
|
extern void ext4_ext_init(struct super_block *);
|
|
|
|
extern void ext4_ext_release(struct super_block *);
|
2011-01-14 12:07:43 +00:00
|
|
|
extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
|
2007-07-18 01:42:41 +00:00
|
|
|
loff_t len);
|
2013-06-04 17:21:11 +00:00
|
|
|
extern int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
|
|
|
|
loff_t offset, ssize_t len);
|
2019-10-16 07:37:08 +00:00
|
|
|
extern int ext4_convert_unwritten_io_end_vec(handle_t *handle,
|
|
|
|
ext4_io_end_t *io_end);
|
2010-05-16 23:00:00 +00:00
|
|
|
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
|
|
|
|
struct ext4_map_blocks *map, int flags);
|
2012-11-28 18:03:30 +00:00
|
|
|
extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
|
|
|
|
int num,
|
|
|
|
struct ext4_ext_path *path);
|
|
|
|
extern int ext4_ext_insert_extent(handle_t *, struct inode *,
|
2014-09-01 18:37:09 +00:00
|
|
|
struct ext4_ext_path **,
|
2012-11-28 18:03:30 +00:00
|
|
|
struct ext4_extent *, int);
|
2014-09-01 18:43:09 +00:00
|
|
|
extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
|
|
|
|
struct ext4_ext_path **,
|
|
|
|
int flags);
|
2022-09-24 02:12:11 +00:00
|
|
|
extern void ext4_free_ext_path(struct ext4_ext_path *);
|
2012-11-28 18:03:30 +00:00
|
|
|
extern int ext4_ext_check_inode(struct inode *inode);
|
2014-08-31 03:52:19 +00:00
|
|
|
extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
|
2008-11-22 20:04:59 +00:00
|
|
|
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|
|
|
__u64 start, __u64 len);
|
2019-08-11 20:32:41 +00:00
|
|
|
extern int ext4_get_es_cache(struct inode *inode,
|
|
|
|
struct fiemap_extent_info *fieinfo,
|
|
|
|
__u64 start, __u64 len);
|
2013-08-17 02:05:14 +00:00
|
|
|
extern int ext4_ext_precache(struct inode *inode);
|
2014-08-31 03:52:19 +00:00
|
|
|
extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
|
|
|
|
struct inode *inode2, ext4_lblk_t lblk1,
|
|
|
|
ext4_lblk_t lblk2, ext4_lblk_t count,
|
|
|
|
int mark_unwritten,int *err);
|
2018-10-01 18:19:37 +00:00
|
|
|
extern int ext4_clu_mapped(struct inode *inode, ext4_lblk_t lclu);
|
2019-11-05 16:44:16 +00:00
|
|
|
extern int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
|
2019-11-05 16:44:29 +00:00
|
|
|
int check_cred, int restart_cred,
|
|
|
|
int revoke_cred);
|
2020-10-15 20:37:59 +00:00
|
|
|
extern void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end);
|
|
|
|
extern int ext4_ext_replay_set_iblocks(struct inode *inode);
|
|
|
|
extern int ext4_ext_replay_update_ex(struct inode *inode, ext4_lblk_t start,
|
|
|
|
int len, int unwritten, ext4_fsblk_t pblk);
|
|
|
|
extern int ext4_ext_clear_bb(struct inode *inode);
|
2019-11-05 16:44:16 +00:00
|
|
|
|
2012-11-28 18:03:30 +00:00
|
|
|
|
2009-06-17 23:24:03 +00:00
|
|
|
/* move_extent.c */
|
2013-04-08 16:54:05 +00:00
|
|
|
extern void ext4_double_down_write_data_sem(struct inode *first,
|
|
|
|
struct inode *second);
|
|
|
|
extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
|
|
|
|
struct inode *donor_inode);
|
2009-06-17 23:24:03 +00:00
|
|
|
extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
|
|
|
|
__u64 start_orig, __u64 start_donor,
|
|
|
|
__u64 len, __u64 *moved_len);
|
|
|
|
|
2010-10-28 01:30:10 +00:00
|
|
|
/* page-io.c */
|
2010-10-28 01:30:14 +00:00
|
|
|
extern int __init ext4_init_pageio(void);
|
|
|
|
extern void ext4_exit_pageio(void);
|
2010-10-28 01:30:10 +00:00
|
|
|
extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
|
2013-06-04 15:58:58 +00:00
|
|
|
extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
|
|
|
|
extern int ext4_put_io_end(ext4_io_end_t *io_end);
|
|
|
|
extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
|
|
|
|
extern void ext4_io_submit_init(struct ext4_io_submit *io,
|
|
|
|
struct writeback_control *wbc);
|
2013-06-04 18:21:02 +00:00
|
|
|
extern void ext4_end_io_rsv_work(struct work_struct *work);
|
2010-10-28 01:30:10 +00:00
|
|
|
extern void ext4_io_submit(struct ext4_io_submit *io);
|
2023-03-24 18:01:08 +00:00
|
|
|
int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *page,
|
|
|
|
size_t len);
|
2019-10-16 07:37:10 +00:00
|
|
|
extern struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end);
|
|
|
|
extern struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end);
|
2008-11-22 20:04:59 +00:00
|
|
|
|
2011-05-24 22:31:25 +00:00
|
|
|
/* mmp.c */
|
|
|
|
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
|
|
|
|
|
2021-04-30 18:50:46 +00:00
|
|
|
/* mmp.c */
|
|
|
|
extern void ext4_stop_mmpd(struct ext4_sb_info *sbi);
|
|
|
|
|
2019-07-22 16:26:24 +00:00
|
|
|
/* verity.c */
|
|
|
|
extern const struct fsverity_operations ext4_verityops;
|
|
|
|
|
2021-08-16 09:57:05 +00:00
|
|
|
/* orphan.c */
|
|
|
|
extern int ext4_orphan_add(handle_t *, struct inode *);
|
|
|
|
extern int ext4_orphan_del(handle_t *, struct inode *);
|
|
|
|
extern void ext4_orphan_cleanup(struct super_block *sb,
|
|
|
|
struct ext4_super_block *es);
|
2021-08-16 09:57:06 +00:00
|
|
|
extern void ext4_release_orphan_info(struct super_block *sb);
|
|
|
|
extern int ext4_init_orphan_info(struct super_block *sb);
|
|
|
|
extern int ext4_orphan_file_empty(struct super_block *sb);
|
|
|
|
extern void ext4_orphan_file_block_trigger(
|
|
|
|
struct jbd2_buffer_trigger_type *triggers,
|
|
|
|
struct buffer_head *bh,
|
|
|
|
void *data, size_t size);
|
2021-08-16 09:57:05 +00:00
|
|
|
|
2009-01-06 02:49:55 +00:00
|
|
|
/*
|
2012-09-20 01:48:00 +00:00
|
|
|
* Add new method to test whether block and inode bitmaps are properly
|
2009-01-06 02:49:55 +00:00
|
|
|
* initialized. With uninit_bg reading the block from disk is not enough
|
|
|
|
* to mark the bitmap uptodate. We need to also zero-out the bitmap
|
|
|
|
*/
|
|
|
|
#define BH_BITMAP_UPTODATE BH_JBDPrivateStart
|
|
|
|
|
|
|
|
static inline int bitmap_uptodate(struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
return (buffer_uptodate(bh) &&
|
|
|
|
test_bit(BH_BITMAP_UPTODATE, &(bh)->b_state));
|
|
|
|
}
|
|
|
|
static inline void set_bitmap_uptodate(struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
|
|
|
|
}
|
|
|
|
|
ext4: serialize unaligned asynchronous DIO
ext4 has a data corruption case when doing non-block-aligned
asynchronous direct IO into a sparse file, as demonstrated
by xfstest 240.
The root cause is that while ext4 preallocates space in the
hole, mappings of that space still look "new" and
dio_zero_block() will zero out the unwritten portions. When
more than one AIO thread is going, they both find this "new"
block and race to zero out their portion; this is uncoordinated
and causes data corruption.
Dave Chinner fixed this for xfs by simply serializing all
unaligned asynchronous direct IO. I've done the same here.
The difference is that we only wait on conversions, not all IO.
This is a very big hammer, and I'm not very pleased with
stuffing this into ext4_file_write(). But since ext4 is
DIO_LOCKING, we need to serialize it at this high level.
I tried to move this into ext4_ext_direct_IO, but by then
we have the i_mutex already, and we will wait on the
work queue to do conversions - which must also take the
i_mutex. So that won't work.
This was originally exposed by qemu-kvm installing to
a raw disk image with a normal sector-63 alignment. I've
tested a backport of this patch with qemu, and it does
avoid the corruption. It is also quite a lot slower
(14 min for package installs, vs. 8 min for well-aligned)
but I'll take slow correctness over fast corruption any day.
Mingming suggested that we can track outstanding
conversions, and wait on those so that non-sparse
files won't be affected, and I've implemented that here;
unaligned AIO to nonsparse files won't take a perf hit.
[tytso@mit.edu: Keep the mutex as a hashed array instead
of bloating the ext4 inode]
[tytso@mit.edu: Fix up namespace issues so that global
variables are protected with an "ext4_" prefix.]
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
2011-02-12 13:17:34 +00:00
|
|
|
/* For ioend & aio unwritten conversion wait queues */
|
|
|
|
#define EXT4_WQ_HASH_SZ 37
|
|
|
|
#define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\
|
|
|
|
EXT4_WQ_HASH_SZ])
|
|
|
|
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
|
|
|
|
|
2011-07-27 01:35:44 +00:00
|
|
|
extern int ext4_resize_begin(struct super_block *sb);
|
2022-06-29 04:00:26 +00:00
|
|
|
extern int ext4_resize_end(struct super_block *sb, bool update_backups);
|
2011-07-27 01:35:44 +00:00
|
|
|
|
2016-02-28 21:36:38 +00:00
|
|
|
static inline void ext4_set_io_unwritten_flag(struct inode *inode,
|
|
|
|
struct ext4_io_end *io_end)
|
|
|
|
{
|
|
|
|
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
|
|
|
|
io_end->flag |= EXT4_IO_END_UNWRITTEN;
|
|
|
|
atomic_inc(&EXT4_I(inode)->i_unwritten);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
|
|
|
|
{
|
|
|
|
struct inode *inode = io_end->inode;
|
|
|
|
|
|
|
|
if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
|
|
|
|
io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
|
|
|
|
/* Wake up anyone waiting on unwritten extent conversion */
|
|
|
|
if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
|
|
|
|
wake_up_all(ext4_ioend_wq(inode));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-28 07:20:26 +00:00
|
|
|
extern const struct iomap_ops ext4_iomap_ops;
|
2019-12-18 17:44:33 +00:00
|
|
|
extern const struct iomap_ops ext4_iomap_overwrite_ops;
|
2019-11-05 12:03:31 +00:00
|
|
|
extern const struct iomap_ops ext4_iomap_report_ops;
|
2016-11-20 22:36:06 +00:00
|
|
|
|
2019-08-23 03:00:32 +00:00
|
|
|
static inline int ext4_buffer_uptodate(struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If the buffer has the write error flag, we have failed
|
|
|
|
* to write out data in the block. In this case, we don't
|
|
|
|
* have to read the block because we may read the old data
|
|
|
|
* successfully.
|
|
|
|
*/
|
2021-04-26 06:29:47 +00:00
|
|
|
if (buffer_write_io_error(bh))
|
2019-08-23 03:00:32 +00:00
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
return buffer_uptodate(bh);
|
|
|
|
}
|
|
|
|
|
2006-10-11 08:20:50 +00:00
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
2015-10-17 20:16:04 +00:00
|
|
|
#define EFSBADCRC EBADMSG /* Bad CRC detected */
|
|
|
|
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
|
|
|
|
|
2008-04-29 22:13:32 +00:00
|
|
|
#endif /* _EXT4_H */
|