for-6.7-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmU/xAEACgkQxWXV+ddt
 WDvYKg//SjTimA5Nins9mb4jdz8n+dDeZnQhKzy3FqInU41EzDRc4WwnEODmDlTa
 AyU9rGB3k0JNSUc075jZFCyLqq/ARiOqRi4x33Gk0ckIlc4X5OgBoqP2XkPh0VlP
 txskLCrmhc3pwyR4ErlFDX2jebIUXfkv39bJuE40grGvUatRe+WNq0ERIrgO8RAr
 Rc3hBotMH8AIqfD1L6j1ZiZIAyrOkT1BJMuqeoq27/gJZn/MRhM9TCrMTzfWGaoW
 SxPrQiCDEN3KECsOY/caroMn3AekDijg/ley1Nf7Z0N6oEV+n4VWWPBFE9HhRz83
 9fIdvSbGjSJF6ekzTjcVXPAbcuKZFzeqOdBRMIW3TIUo7mZQyJTVkMsc1y/NL2Z3
 9DhlRLIzvWJJjt1CEK0u18n5IU+dGngdktbhWWIuIlo8r+G/iKR/7zqU92VfWLHL
 Z7/eh6HgH5zr2bm+yKORbrUjkv4IVhGVarW8D4aM+MCG0lFN2GaPcJCCUrp4n7rZ
 PzpQbxXa38ANBk6hsp4ndS8TJSBL9moY8tumzLcKg97nzNMV6KpBdV/G6/QfRLCN
 3kM6UbwTAkMwGcQS86Mqx6s04ORLnQeD6f7N6X4Ppx0Mi/zkjI2HkRuvQGp12B0v
 iZjCCZAYY2Iu+/TU0GrCXSss/grzIAUPzM9msyV3XGO/VBpwdec=
 =9TVx
 -----END PGP SIGNATURE-----

Merge tag 'for-6.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "New features:

   - raid-stripe-tree

     New tree for logical file extent mapping where the physical mapping
     may not match on multiple devices. This is now used in zoned mode
     to implement RAID0/RAID1* profiles, but can be used in non-zoned
     mode as well. The support for RAID56 is in development and will
     eventually fix the problems with the current implementation. This
     is a backward incompatible feature and has to be enabled at mkfs
     time.

   - simple quota accounting (squota)

     A simplified mode of qgroup that accounts all space on the initial
     extent owners (a subvolume), the snapshots are then cheap to create
     and delete. The deletion of snapshots in fully accounting qgroups
     is a known CPU/IO performance bottleneck.

     The squota is not suitable for the general use case but works well
     for containers where the original subvolume exists for the whole
     time. This is a backward incompatible feature as it needs extending
     some structures, but can be enabled on an existing filesystem.

   - temporary filesystem fsid (temp_fsid)

     The fsid identifies a filesystem and is hard coded in the
     structures, which disallows mounting the same fsid found on
     different devices.

     For a single device filesystem this is not strictly necessary, a
     new temporary fsid can be generated on mount e.g. after a device is
     cloned. This will be used by Steam Deck for root partition A/B
     testing, or can be used for VM root images.

  Other user visible changes:

   - filesystems with partially finished metadata_uuid conversion cannot
     be mounted anymore and the uuid fixup has to be done by btrfs-progs
     (btrfstune).

  Performance improvements:

   - reduce reservations for checksum deletions (with enabled free space
     tree by factor of 4), on a sample workload on file with many
     extents the deletion time decreased by 12%

   - make extent state merges more efficient during insertions, reduce
     rb-tree iterations (run time of critical functions reduced by 5%)

  Core changes:

   - the integrity check functionality has been removed, this was a
     debugging feature and removal does not affect other integrity
     checks like checksums or tree-checker

   - space reservation changes:

      - more efficient delayed ref reservations, this avoids building up
        too much work or overusing or exhausting the global block
        reserve in some situations

      - move delayed refs reservation to the transaction start time,
        this prevents some ENOSPC corner cases related to exhaustion of
        global reserve

      - improvements in reducing excessive reservations for block group
        items

      - adjust overcommit logic in near full situations, account for one
        more chunk to eventually allocate metadata chunk, this is mostly
        relevant for small filesystems (<10GiB)

   - single device filesystems are scanned but not registered (except
     seed devices), this allows temp_fsid to work

   - qgroup iterations do not need GFP_ATOMIC allocations anymore

   - cleanups, refactoring, reduced data structure size, function
     parameter simplifications, error handling fixes"

* tag 'for-6.7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (156 commits)
  btrfs: open code timespec64 in struct btrfs_inode
  btrfs: remove redundant log root tree index assignment during log sync
  btrfs: remove redundant initialization of variable dirty in btrfs_update_time()
  btrfs: sysfs: show temp_fsid feature
  btrfs: disable the device add feature for temp-fsid
  btrfs: disable the seed feature for temp-fsid
  btrfs: update comment for temp-fsid, fsid, and metadata_uuid
  btrfs: remove pointless empty log context list check when syncing log
  btrfs: update comment for struct btrfs_inode::lock
  btrfs: remove pointless barrier from btrfs_sync_file()
  btrfs: add and use helpers for reading and writing last_trans_committed
  btrfs: add and use helpers for reading and writing fs_info->generation
  btrfs: add and use helpers for reading and writing log_transid
  btrfs: add and use helpers for reading and writing last_log_commit
  btrfs: support cloned-device mount capability
  btrfs: add helper function find_fsid_by_disk
  btrfs: stop reserving excessive space for block group item insertions
  btrfs: stop reserving excessive space for block group item updates
  btrfs: reorder btrfs_inode to fill gaps
  btrfs: open code btrfs_ordered_inode_tree in btrfs_inode
  ...
This commit is contained in:
Linus Torvalds 2023-10-30 10:42:06 -10:00
commit d5acbc60fa
83 changed files with 4037 additions and 5295 deletions

View File

@ -48,27 +48,6 @@ config BTRFS_FS_POSIX_ACL
If you don't know what Access Control Lists are, say N
config BTRFS_FS_CHECK_INTEGRITY
bool "Btrfs with integrity check tool compiled in (DEPRECATED)"
depends on BTRFS_FS
help
This feature has been deprecated and will be removed in 6.7.
Adds code that examines all block write requests (including
writes of the super block). The goal is to verify that the
state of the filesystem on disk is always consistent, i.e.,
after a power-loss or kernel panic event the filesystem is
in a consistent state.
If the integrity check tool is included and activated in
the mount options, plenty of kernel memory is used, and
plenty of additional CPU cycles are spent. Enabling this
functionality is not intended for normal use.
In most cases, unless you are a btrfs developer who needs
to verify the integrity of (super)-block write requests
during the run of a regression test, say N
config BTRFS_FS_RUN_SANITY_TESTS
bool "Btrfs will run sanity tests upon loading"
depends on BTRFS_FS

View File

@ -33,10 +33,9 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
block-rsv.o delalloc-space.o block-group.o discard.o reflink.o \
subpage.o tree-mod-log.o extent-io-tree.o fs.o messages.o bio.o \
lru_cache.o
lru_cache.o raid-stripe-tree.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
btrfs-$(CONFIG_BTRFS_FS_REF_VERIFY) += ref-verify.o
btrfs-$(CONFIG_BLK_DEV_ZONED) += zoned.o
btrfs-$(CONFIG_FS_VERITY) += verity.o

View File

@ -4,6 +4,7 @@
#define BTRFS_ACCESSORS_H
#include <linux/stddef.h>
#include <asm/unaligned.h>
struct btrfs_map_token {
struct extent_buffer *eb;
@ -305,6 +306,14 @@ BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
BTRFS_SETGET_FUNCS(stripe_extent_encoding, struct btrfs_stripe_extent, encoding, 8);
BTRFS_SETGET_FUNCS(raid_stride_devid, struct btrfs_raid_stride, devid, 64);
BTRFS_SETGET_FUNCS(raid_stride_physical, struct btrfs_raid_stride, physical, 64);
BTRFS_SETGET_STACK_FUNCS(stack_stripe_extent_encoding,
struct btrfs_stripe_extent, encoding, 8);
BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_devid, struct btrfs_raid_stride, devid, 64);
BTRFS_SETGET_STACK_FUNCS(stack_raid_stride_physical, struct btrfs_raid_stride, physical, 64);
/* struct btrfs_dev_extent */
BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, chunk_tree, 64);
BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
@ -349,6 +358,9 @@ BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, count, 3
BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, count, 32);
BTRFS_SETGET_FUNCS(extent_owner_ref_root_id, struct btrfs_extent_owner_ref,
root_id, 64);
BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref,
type, 8);
BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref,
@ -365,6 +377,8 @@ static inline u32 btrfs_extent_inline_ref_size(int type)
if (type == BTRFS_EXTENT_DATA_REF_KEY)
return sizeof(struct btrfs_extent_data_ref) +
offsetof(struct btrfs_extent_inline_ref, offset);
if (type == BTRFS_EXTENT_OWNER_REF_KEY)
return sizeof(struct btrfs_extent_inline_ref);
return 0;
}
@ -966,6 +980,8 @@ BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item,
flags, 64);
BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item,
rescan, 64);
BTRFS_SETGET_FUNCS(qgroup_status_enable_gen, struct btrfs_qgroup_status_item,
enable_gen, 64);
/* btrfs_qgroup_info_item */
BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item,

View File

@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <trace/events/btrfs.h>
#include "async-thread.h"
#include "ctree.h"
@ -242,7 +243,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
break;
trace_btrfs_ordered_sched(work);
spin_unlock_irqrestore(lock, flags);
work->ordered_func(work);
work->ordered_func(work, false);
/* now take the lock again and drop our item from the list */
spin_lock_irqsave(lock, flags);
@ -277,7 +278,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
* We don't want to call the ordered free functions with
* the lock held.
*/
work->ordered_free(work);
work->ordered_func(work, true);
/* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work);
}
@ -285,7 +286,7 @@ static void run_ordered_work(struct btrfs_workqueue *wq,
spin_unlock_irqrestore(lock, flags);
if (free_self) {
self->ordered_free(self);
self->ordered_func(self, true);
/* NB: self must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, self);
}
@ -300,7 +301,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
/*
* We should not touch things inside work in the following cases:
* 1) after work->func() if it has no ordered_free
* 1) after work->func() if it has no ordered_func(..., true) to free
* Since the struct is freed in work->func().
* 2) after setting WORK_DONE_BIT
* The work may be freed in other threads almost instantly.
@ -329,11 +330,10 @@ static void btrfs_work_helper(struct work_struct *normal_work)
}
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t ordered_func, btrfs_func_t ordered_free)
btrfs_ordered_func_t ordered_func)
{
work->func = func;
work->ordered_func = ordered_func;
work->ordered_free = ordered_free;
INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0;

View File

@ -13,11 +13,11 @@ struct btrfs_fs_info;
struct btrfs_workqueue;
struct btrfs_work;
typedef void (*btrfs_func_t)(struct btrfs_work *arg);
typedef void (*btrfs_ordered_func_t)(struct btrfs_work *arg, bool);
struct btrfs_work {
btrfs_func_t func;
btrfs_func_t ordered_func;
btrfs_func_t ordered_free;
btrfs_ordered_func_t ordered_func;
/* Don't touch things below */
struct work_struct normal_work;
@ -35,7 +35,7 @@ struct btrfs_workqueue *btrfs_alloc_ordered_workqueue(
struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags);
void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t ordered_func, btrfs_func_t ordered_free);
btrfs_ordered_func_t ordered_func);
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);

View File

@ -1129,6 +1129,9 @@ static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
count, sc, GFP_NOFS);
break;
}
case BTRFS_EXTENT_OWNER_REF_KEY:
ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
break;
default:
WARN_ON(1);
}
@ -2998,7 +3001,7 @@ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
}
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
struct btrfs_backref_cache *cache, int is_reloc)
struct btrfs_backref_cache *cache, bool is_reloc)
{
int i;

View File

@ -247,7 +247,7 @@ struct prelim_ref {
struct rb_node rbnode;
u64 root_id;
struct btrfs_key key_for_search;
int level;
u8 level;
int count;
struct extent_inode_elem *inode_list;
u64 parent;
@ -440,11 +440,11 @@ struct btrfs_backref_cache {
* Reloction backref cache require more info for reloc root compared
* to generic backref cache.
*/
unsigned int is_reloc;
bool is_reloc;
};
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
struct btrfs_backref_cache *cache, int is_reloc);
struct btrfs_backref_cache *cache, bool is_reloc);
struct btrfs_backref_node *btrfs_backref_alloc_node(
struct btrfs_backref_cache *cache, u64 bytenr, int level);
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
@ -533,9 +533,9 @@ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
u64 bytenr, int errno)
u64 bytenr, int error)
{
btrfs_panic(fs_info, errno,
btrfs_panic(fs_info, error,
"Inconsistency in backref cache found at offset %llu",
bytenr);
}

View File

@ -10,11 +10,11 @@
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "dev-replace.h"
#include "rcu-string.h"
#include "zoned.h"
#include "file-item.h"
#include "raid-stripe-tree.h"
static struct bio_set btrfs_bioset;
static struct bio_set btrfs_clone_bioset;
@ -416,6 +416,9 @@ static void btrfs_orig_write_end_io(struct bio *bio)
else
bio->bi_status = BLK_STS_OK;
if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
btrfs_orig_bbio_end_io(bbio);
btrfs_put_bioc(bioc);
}
@ -427,6 +430,8 @@ static void btrfs_clone_write_end_io(struct bio *bio)
if (bio->bi_status) {
atomic_inc(&stripe->bioc->error);
btrfs_log_dev_io_error(bio, stripe->dev);
} else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
}
/* Pass on control to the original bio this one was cloned from */
@ -463,8 +468,6 @@ static void btrfs_submit_dev_bio(struct btrfs_device *dev, struct bio *bio)
(unsigned long)dev->bdev->bd_dev, btrfs_dev_name(dev),
dev->devid, bio->bi_iter.bi_size);
btrfsic_check_bio(bio);
if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT)
blkcg_punt_bio_submit(bio);
else
@ -490,6 +493,7 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr)
bio->bi_private = &bioc->stripes[dev_nr];
bio->bi_iter.bi_sector = bioc->stripes[dev_nr].physical >> SECTOR_SHIFT;
bioc->stripes[dev_nr].bioc = bioc;
bioc->size = bio->bi_iter.bi_size;
btrfs_submit_dev_bio(bioc->stripes[dev_nr].dev, bio);
}
@ -499,6 +503,8 @@ static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc,
if (!bioc) {
/* Single mirror read/write fast path. */
btrfs_bio(bio)->mirror_num = mirror_num;
if (bio_op(bio) != REQ_OP_READ)
btrfs_bio(bio)->orig_physical = smap->physical;
bio->bi_iter.bi_sector = smap->physical >> SECTOR_SHIFT;
if (bio_op(bio) != REQ_OP_READ)
btrfs_bio(bio)->orig_physical = smap->physical;
@ -568,13 +574,20 @@ static void run_one_async_start(struct btrfs_work *work)
*
* At IO completion time the csums attached on the ordered extent record are
* inserted into the tree.
*
* If called with @do_free == true, then it will free the work struct.
*/
static void run_one_async_done(struct btrfs_work *work)
static void run_one_async_done(struct btrfs_work *work, bool do_free)
{
struct async_submit_bio *async =
container_of(work, struct async_submit_bio, work);
struct bio *bio = &async->bbio->bio;
if (do_free) {
kfree(container_of(work, struct async_submit_bio, work));
return;
}
/* If an error occurred we just want to clean up the bio and move on. */
if (bio->bi_status) {
btrfs_orig_bbio_end_io(async->bbio);
@ -590,11 +603,6 @@ static void run_one_async_done(struct btrfs_work *work)
__btrfs_submit_bio(bio, async->bioc, &async->smap, async->mirror_num);
}
static void run_one_async_free(struct btrfs_work *work)
{
kfree(container_of(work, struct async_submit_bio, work));
}
static bool should_async_write(struct btrfs_bio *bbio)
{
/* Submit synchronously if the checksum implementation is fast. */
@ -636,8 +644,7 @@ static bool btrfs_wq_submit_bio(struct btrfs_bio *bbio,
async->smap = *smap;
async->mirror_num = mirror_num;
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done,
run_one_async_free);
btrfs_init_work(&async->work, run_one_async_start, run_one_async_done);
btrfs_queue_work(fs_info->workers, &async->work);
return true;
}
@ -657,9 +664,11 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
blk_status_t ret;
int error;
smap.is_scrub = !bbio->inode;
btrfs_bio_counter_inc_blocked(fs_info);
error = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
&bioc, &smap, &mirror_num, 1);
&bioc, &smap, &mirror_num);
if (error) {
ret = errno_to_blk_status(error);
goto fail;
@ -691,6 +700,18 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
bio->bi_opf |= REQ_OP_ZONE_APPEND;
}
if (is_data_bbio(bbio) && bioc &&
btrfs_need_stripe_tree_update(bioc->fs_info, bioc->map_type)) {
/*
* No locking for the list update, as we only add to
* the list in the I/O submission path, and list
* iteration only happens in the completion path, which
* can't happen until after the last submission.
*/
btrfs_get_bioc(bioc);
list_add_tail(&bioc->rst_ordered_entry, &bbio->ordered->bioc_list);
}
/*
* Csum items for reloc roots have already been cloned at this
* point, so they are handled as part of the no-checksum case.
@ -779,8 +800,6 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
bio_init(&bio, smap.dev->bdev, &bvec, 1, REQ_OP_WRITE | REQ_SYNC);
bio.bi_iter.bi_sector = smap.physical >> SECTOR_SHIFT;
__bio_add_page(&bio, page, length, pg_offset);
btrfsic_check_bio(&bio);
ret = submit_bio_wait(&bio);
if (ret) {
/* try to remap that extent elsewhere? */

View File

@ -935,7 +935,7 @@ int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
caching_ctl->block_group = cache;
refcount_set(&caching_ctl->count, 2);
atomic_set(&caching_ctl->progress, 0);
btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) {
@ -1286,7 +1286,7 @@ out:
/* Once for the lookup reference */
btrfs_put_block_group(block_group);
if (remove_rsv)
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
btrfs_free_path(path);
return ret;
}
@ -2601,7 +2601,7 @@ static int insert_dev_extent(struct btrfs_trans_handle *trans,
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@ -2709,7 +2709,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
/* Already aborted the transaction if it failed. */
next:
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
list_del_init(&block_group->bg_list);
clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
}
@ -2819,8 +2819,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
#endif
list_add_tail(&cache->bg_list, &trans->new_bgs);
trans->delayed_ref_updates++;
btrfs_update_delayed_refs_rsv(trans);
btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
set_avail_alloc_bits(fs_info, type);
return cache;
@ -3025,7 +3024,7 @@ static int update_block_group_item(struct btrfs_trans_handle *trans,
cache->global_root_id);
btrfs_set_stack_block_group_flags(&bgi, cache->flags);
write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
fail:
btrfs_release_path(path);
/*
@ -3051,7 +3050,6 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct inode *inode = NULL;
struct extent_changeset *data_reserved = NULL;
u64 alloc_hint = 0;
@ -3103,7 +3101,7 @@ again:
* time.
*/
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
/*
* So theoretically we could recover from this, simply set the
@ -3370,7 +3368,7 @@ again:
if (should_put)
btrfs_put_block_group(cache);
if (drop_reserve)
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
/*
* Avoid blocking other tasks for too long. It might even save
* us from writing caches for block groups that are going to be
@ -3474,8 +3472,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
cache_save_setup(cache, trans, path);
if (!ret)
ret = btrfs_run_delayed_refs(trans,
(unsigned long) -1);
ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
cache->io_ctl.inode = NULL;
@ -3518,7 +3515,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
/* If its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@ -3543,12 +3540,12 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_block_group *cache = NULL;
u64 total = num_bytes;
struct btrfs_space_info *space_info;
struct btrfs_block_group *cache;
u64 old_val;
u64 byte_in_group;
bool reclaim = false;
bool bg_already_dirty = true;
int factor;
int ret = 0;
/* Block accounting for super block */
spin_lock(&info->delalloc_root_lock);
@ -3560,97 +3557,86 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_set_super_bytes_used(info->super_copy, old_val);
spin_unlock(&info->delalloc_root_lock);
while (total) {
struct btrfs_space_info *space_info;
bool reclaim = false;
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -ENOENT;
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache) {
ret = -ENOENT;
break;
}
space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
/* An extent can not span multiple block groups. */
ASSERT(bytenr + num_bytes <= cache->start + cache->length);
/*
* If this block group has free space cache written out, we
* need to make sure to load it if we are removing space. This
* is because we need the unpinning stage to actually add the
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && !btrfs_block_group_done(cache))
btrfs_cache_block_group(cache, true);
space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
byte_in_group = bytenr - cache->start;
WARN_ON(byte_in_group > cache->length);
/*
* If this block group has free space cache written out, we need to make
* sure to load it if we are removing space. This is because we need
* the unpinning stage to actually add the space back to the block group,
* otherwise we will leak space.
*/
if (!alloc && !btrfs_block_group_done(cache))
btrfs_cache_block_group(cache, true);
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
old_val = cache->used;
num_bytes = min(total, cache->length - byte_in_group);
if (alloc) {
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->bytes_used += num_bytes;
space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info, space_info,
num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
old_val = cache->used;
if (alloc) {
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->bytes_used += num_bytes;
space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
reclaim = should_reclaim_block_group(cache, num_bytes);
reclaim = should_reclaim_block_group(cache, num_bytes);
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
set_extent_bit(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
EXTENT_DIRTY, NULL);
}
spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
trans->delayed_ref_updates++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
/*
* No longer have used bytes in this block group, queue it for
* deletion. We do this after adding the block group to the
* dirty list to avoid races between cleaner kthread and space
* cache writeout.
*/
if (!alloc && old_val == 0) {
if (!btrfs_test_opt(info, DISCARD_ASYNC))
btrfs_mark_bg_unused(cache);
} else if (!alloc && reclaim) {
btrfs_mark_bg_to_reclaim(cache);
}
btrfs_put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
set_extent_bit(&trans->transaction->pinned_extents, bytenr,
bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
}
spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
bg_already_dirty = false;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
/*
* No longer have used bytes in this block group, queue it for deletion.
* We do this after adding the block group to the dirty list to avoid
* races between cleaner kthread and space cache writeout.
*/
if (!alloc && old_val == 0) {
if (!btrfs_test_opt(info, DISCARD_ASYNC))
btrfs_mark_bg_unused(cache);
} else if (!alloc && reclaim) {
btrfs_mark_bg_to_reclaim(cache);
}
btrfs_put_block_group(cache);
/* Modified block groups are accounted for in the delayed_refs_rsv. */
btrfs_update_delayed_refs_rsv(trans);
return ret;
if (!bg_already_dirty)
btrfs_inc_delayed_refs_rsv_bg_updates(info);
return 0;
}
/*

View File

@ -221,7 +221,8 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
if (num_bytes == 0)
return 0;
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
num_bytes, flush);
if (!ret)
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
@ -261,7 +262,8 @@ int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
if (!ret)
return 0;
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
num_bytes, flush);
if (!ret) {
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
return 0;
@ -279,10 +281,10 @@ u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *target = NULL;
/*
* If we are the delayed_rsv then push to the global rsv, otherwise dump
* into the delayed rsv if it is not full.
* If we are a delayed block reserve then push to the global rsv,
* otherwise dump into the global delayed reserve if it is not full.
*/
if (block_rsv == delayed_rsv)
if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
target = global_rsv;
else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
target = delayed_rsv;
@ -354,6 +356,11 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
min_items++;
}
if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
min_items++;
}
/*
* But we also want to reserve enough space so we can do the fallback
* global reserve for an unlink, which is an additional
@ -405,6 +412,7 @@ void btrfs_init_root_block_rsv(struct btrfs_root *root)
case BTRFS_EXTENT_TREE_OBJECTID:
case BTRFS_FREE_SPACE_TREE_OBJECTID:
case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
case BTRFS_RAID_STRIPE_TREE_OBJECTID:
root->block_rsv = &fs_info->delayed_refs_rsv;
break;
case BTRFS_ROOT_TREE_OBJECTID:
@ -517,8 +525,8 @@ again:
block_rsv->type, ret);
}
try_reserve:
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH);
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
blocksize, BTRFS_RESERVE_NO_FLUSH);
if (!ret)
return block_rsv;
/*
@ -539,7 +547,7 @@ try_reserve:
* one last time to force a reservation if there's enough actual space
* on disk to make the reservation.
*/
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
BTRFS_RESERVE_FLUSH_EMERGENCY);
if (!ret)
return block_rsv;

View File

@ -8,6 +8,8 @@
#include <linux/hash.h>
#include <linux/refcount.h>
#include <linux/fscrypt.h>
#include <trace/events/btrfs.h>
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
@ -79,11 +81,21 @@ struct btrfs_inode {
*/
struct btrfs_key location;
/* Cached value of inode property 'compression'. */
u8 prop_compress;
/*
* Force compression on the file using the defrag ioctl, could be
* different from prop_compress and takes precedence if set.
*/
u8 defrag_compress;
/*
* Lock for counters and all fields used to determine if the inode is in
* the log or not (last_trans, last_sub_trans, last_log_commit,
* logged_trans), to access/update new_delalloc_bytes and to update the
* VFS' inode number of bytes used.
* logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
* defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
* update the VFS' inode number of bytes used.
*/
spinlock_t lock;
@ -102,8 +114,18 @@ struct btrfs_inode {
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
/*
* Counters to keep track of the number of extent item's we may use due
* to delalloc and such. outstanding_extents is the number of extent
* items we think we'll end up using, and reserved_extents is the number
* of extent items we've reserved metadata for. Protected by 'lock'.
*/
unsigned outstanding_extents;
/* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree;
spinlock_t ordered_tree_lock;
struct rb_root ordered_tree;
struct rb_node *ordered_tree_last;
/* list of all the delalloc inodes in the FS. There are times we need
* to write all the delalloc pages to disk, and this list is used
@ -122,28 +144,31 @@ struct btrfs_inode {
u64 generation;
/*
* transid of the trans_handle that last modified this inode
* ID of the transaction handle that last modified this inode.
* Protected by 'lock'.
*/
u64 last_trans;
/*
* transid that last logged this inode
* ID of the transaction that last logged this inode.
* Protected by 'lock'.
*/
u64 logged_trans;
/*
* log transid when this inode was last modified
* Log transaction ID when this inode was last modified.
* Protected by 'lock'.
*/
int last_sub_trans;
/* a local copy of root's last_log_commit */
/* A local copy of root's last_log_commit. Protected by 'lock'. */
int last_log_commit;
union {
/*
* Total number of bytes pending delalloc, used by stat to
* calculate the real block usage of the file. This is used
* only for files.
* only for files. Protected by 'lock'.
*/
u64 delalloc_bytes;
/*
@ -161,7 +186,7 @@ struct btrfs_inode {
* Total number of bytes pending delalloc that fall within a file
* range that is either a hole or beyond EOF (and no prealloc extent
* exists in the range). This is always <= delalloc_bytes and this
* is used only for files.
* is used only for files. Protected by 'lock'.
*/
u64 new_delalloc_bytes;
/*
@ -172,15 +197,15 @@ struct btrfs_inode {
};
/*
* total number of bytes pending defrag, used by stat to check whether
* it needs COW.
* Total number of bytes pending defrag, used by stat to check whether
* it needs COW. Protected by 'lock'.
*/
u64 defrag_bytes;
/*
* the size of the file stored in the metadata on disk. data=ordered
* The size of the file stored in the metadata on disk. data=ordered
* means the in-memory i_size might be larger than the size on disk
* because not all the blocks are written yet.
* because not all the blocks are written yet. Protected by 'lock'.
*/
u64 disk_i_size;
@ -214,7 +239,7 @@ struct btrfs_inode {
/*
* Number of bytes outstanding that are going to need csums. This is
* used in ENOSPC accounting.
* used in ENOSPC accounting. Protected by 'lock'.
*/
u64 csum_bytes;
@ -223,30 +248,13 @@ struct btrfs_inode {
/* Read-only compatibility flags, upper half of inode_item::flags */
u32 ro_flags;
/*
* Counters to keep track of the number of extent item's we may use due
* to delalloc and such. outstanding_extents is the number of extent
* items we think we'll end up using, and reserved_extents is the number
* of extent items we've reserved metadata for.
*/
unsigned outstanding_extents;
struct btrfs_block_rsv block_rsv;
/*
* Cached values of inode properties
*/
unsigned prop_compress; /* per-file compression algorithm */
/*
* Force compression on the file using the defrag ioctl, could be
* different from prop_compress and takes precedence if set
*/
unsigned defrag_compress;
struct btrfs_delayed_node *delayed_node;
/* File creation time. */
struct timespec64 i_otime;
u64 i_otime_sec;
u32 i_otime_nsec;
/* Hook into fs_info->delayed_iputs */
struct list_head delayed_iput;
@ -387,7 +395,7 @@ static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
spin_lock(&inode->lock);
if (inode->logged_trans == generation &&
inode->last_sub_trans <= inode->last_log_commit &&
inode->last_sub_trans <= inode->root->last_log_commit)
inode->last_sub_trans <= btrfs_get_root_last_log_commit(inode->root))
ret = true;
spin_unlock(&inode->lock);
return ret;
@ -481,9 +489,9 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode);
struct btrfs_inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode);
struct btrfs_inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);

File diff suppressed because it is too large Load Diff

View File

@ -1,20 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) STRATO AG 2011. All rights reserved.
*/
#ifndef BTRFS_CHECK_INTEGRITY_H
#define BTRFS_CHECK_INTEGRITY_H
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
void btrfsic_check_bio(struct bio *bio);
#else
static inline void btrfsic_check_bio(struct bio *bio) { }
#endif
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask);
void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
#endif

View File

@ -193,12 +193,12 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb)
unsigned long index = cb->start >> PAGE_SHIFT;
unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
struct folio_batch fbatch;
const int errno = blk_status_to_errno(cb->bbio.bio.bi_status);
const int error = blk_status_to_errno(cb->bbio.bio.bi_status);
int i;
int ret;
if (errno)
mapping_set_error(inode->i_mapping, errno);
if (error)
mapping_set_error(inode->i_mapping, error);
folio_batch_init(&fbatch);
while (index <= end_index) {

View File

@ -230,9 +230,9 @@ noinline void btrfs_release_path(struct btrfs_path *p)
* cause could be a bug, eg. due to ENOSPC, and not for common errors that are
* caused by external factors.
*/
bool __cold abort_should_print_stack(int errno)
bool __cold abort_should_print_stack(int error)
{
switch (errno) {
switch (error) {
case -EIO:
case -EROFS:
case -ENOMEM:
@ -316,6 +316,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
int ret = 0;
int level;
struct btrfs_disk_key disk_key;
u64 reloc_src_root = 0;
WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
@ -328,9 +329,11 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
reloc_src_root = btrfs_header_owner(buf);
cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
&disk_key, level, buf->start, 0,
BTRFS_NESTING_NEW_ROOT);
reloc_src_root, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(cow))
return PTR_ERR(cow);
@ -359,7 +362,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
return ret;
}
btrfs_mark_buffer_dirty(cow);
btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
}
@ -518,13 +521,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* bytes the allocator should try to find free next to the block it returns.
* This is just a hint and may be ignored by the allocator.
*/
static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size,
enum btrfs_lock_nesting nest)
int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size,
enum btrfs_lock_nesting nest)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
@ -533,6 +536,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
int last_ref = 0;
int unlock_orig = 0;
u64 parent_start = 0;
u64 reloc_src_root = 0;
if (*cow_ret == buf)
unlock_orig = 1;
@ -551,12 +555,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_node_key(buf, &disk_key, 0);
if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
parent_start = parent->start;
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent)
parent_start = parent->start;
reloc_src_root = btrfs_header_owner(buf);
}
cow = btrfs_alloc_tree_block(trans, root, parent_start,
root->root_key.objectid, &disk_key, level,
search_start, empty_size, nest);
search_start, empty_size, reloc_src_root, nest);
if (IS_ERR(cow))
return PTR_ERR(cow);
@ -627,7 +633,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
cow->start);
btrfs_set_node_ptr_generation(parent, parent_slot,
trans->transid);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
if (last_ref) {
ret = btrfs_tree_mod_log_free_eb(buf);
if (ret) {
@ -643,7 +649,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (unlock_orig)
btrfs_tree_unlock(buf);
free_extent_buffer_stale(buf);
btrfs_mark_buffer_dirty(cow);
btrfs_mark_buffer_dirty(trans, cow);
*cow_ret = cow;
return 0;
}
@ -679,11 +685,11 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
}
/*
* cows a single block, see __btrfs_cow_block for the real work.
* COWs a single block, see btrfs_force_cow_block() for the real work.
* This version of it has extra checks so that a block isn't COWed more than
* once per transaction, as long as it hasn't been written yet
*/
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
@ -723,7 +729,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
return 0;
}
search_start = buf->start & ~((u64)SZ_1G - 1);
search_start = round_down(buf->start, SZ_1G);
/*
* Before CoWing this block for later modification, check if it's
@ -732,8 +738,8 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
* Also We don't care about the error, as it's handled internally.
*/
btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
ret = __btrfs_cow_block(trans, root, buf, parent,
parent_slot, cow_ret, search_start, 0, nest);
ret = btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
cow_ret, search_start, 0, nest);
trace_btrfs_cow_block(root, buf, *cow_ret);
@ -741,49 +747,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
}
ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
/*
* helper function for defrag to decide if two blocks pointed to by a
* node are actually close by
*/
static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
{
if (blocknr < other && other - (blocknr + blocksize) < 32768)
return 1;
if (blocknr > other && blocknr - (other + blocksize) < 32768)
return 1;
return 0;
}
#ifdef __LITTLE_ENDIAN
/*
* Compare two keys, on little-endian the disk order is same as CPU order and
* we can avoid the conversion.
*/
static int comp_keys(const struct btrfs_disk_key *disk_key,
const struct btrfs_key *k2)
{
const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
return btrfs_comp_cpu_keys(k1, k2);
}
#else
/*
* compare two keys in a memcmp fashion
*/
static int comp_keys(const struct btrfs_disk_key *disk,
const struct btrfs_key *k2)
{
struct btrfs_key k1;
btrfs_disk_key_to_cpu(&k1, disk);
return btrfs_comp_cpu_keys(&k1, k2);
}
#endif
/*
* same as comp_keys only with two btrfs_key's
*/
@ -804,105 +767,6 @@ int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_ke
return 0;
}
/*
* this is used by the defrag code to go through all the
* leaves pointed to by a node and reallocate them so that
* disk order is close to key order
*/
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, u64 *last_ret,
struct btrfs_key *progress)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
u64 blocknr;
u64 search_start = *last_ret;
u64 last_block = 0;
u64 other;
u32 parent_nritems;
int end_slot;
int i;
int err = 0;
u32 blocksize;
int progress_passed = 0;
struct btrfs_disk_key disk_key;
/*
* COWing must happen through a running transaction, which always
* matches the current fs generation (it's a transaction with a state
* less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
* into error state to prevent the commit of any transaction.
*/
if (unlikely(trans->transaction != fs_info->running_transaction ||
trans->transid != fs_info->generation)) {
btrfs_abort_transaction(trans, -EUCLEAN);
btrfs_crit(fs_info,
"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
parent->start, btrfs_root_id(root), trans->transid,
fs_info->running_transaction->transid,
fs_info->generation);
return -EUCLEAN;
}
parent_nritems = btrfs_header_nritems(parent);
blocksize = fs_info->nodesize;
end_slot = parent_nritems - 1;
if (parent_nritems <= 1)
return 0;
for (i = start_slot; i <= end_slot; i++) {
int close = 1;
btrfs_node_key(parent, &disk_key, i);
if (!progress_passed && comp_keys(&disk_key, progress) < 0)
continue;
progress_passed = 1;
blocknr = btrfs_node_blockptr(parent, i);
if (last_block == 0)
last_block = blocknr;
if (i > 0) {
other = btrfs_node_blockptr(parent, i - 1);
close = close_blocks(blocknr, other, blocksize);
}
if (!close && i < end_slot) {
other = btrfs_node_blockptr(parent, i + 1);
close = close_blocks(blocknr, other, blocksize);
}
if (close) {
last_block = blocknr;
continue;
}
cur = btrfs_read_node_slot(parent, i);
if (IS_ERR(cur))
return PTR_ERR(cur);
if (search_start == 0)
search_start = last_block;
btrfs_tree_lock(cur);
err = __btrfs_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
(end_slot - i) * blocksize),
BTRFS_NESTING_COW);
if (err) {
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
break;
}
search_start = cur->start;
last_block = cur->start;
*last_ret = search_start;
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
}
return err;
}
/*
* Search for a key in the given extent_buffer.
*
@ -969,7 +833,7 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
tmp = &unaligned;
}
ret = comp_keys(tmp, key);
ret = btrfs_comp_keys(tmp, key);
if (ret < 0)
low = mid + 1;
@ -984,19 +848,19 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
return 1;
}
static void root_add_used(struct btrfs_root *root, u32 size)
static void root_add_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
btrfs_root_used(&root->root_item) + size);
btrfs_root_used(&root->root_item) + root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
static void root_sub_used(struct btrfs_root *root, u32 size)
static void root_sub_used_bytes(struct btrfs_root *root)
{
spin_lock(&root->accounting_lock);
btrfs_set_root_used(&root->root_item,
btrfs_root_used(&root->root_item) - size);
btrfs_root_used(&root->root_item) - root->fs_info->nodesize);
spin_unlock(&root->accounting_lock);
}
@ -1112,7 +976,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* once for the path */
free_extent_buffer(mid);
root_sub_used(root, mid->len);
root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
/* once for the root ptr */
free_extent_buffer_stale(mid);
@ -1182,7 +1046,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
right = NULL;
goto out;
}
root_sub_used(root, right->len);
root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), right,
0, 1);
free_extent_buffer_stale(right);
@ -1197,7 +1061,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_set_node_key(parent, &right_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
}
}
if (btrfs_header_nritems(mid) == 1) {
@ -1240,7 +1104,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
mid = NULL;
goto out;
}
root_sub_used(root, mid->len);
root_sub_used_bytes(root);
btrfs_free_tree_block(trans, btrfs_root_id(root), mid, 0, 1);
free_extent_buffer_stale(mid);
mid = NULL;
@ -1255,7 +1119,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto out;
}
btrfs_set_node_key(parent, &mid_key, pslot);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
}
/* update the path */
@ -1362,7 +1226,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return ret;
}
btrfs_set_node_key(parent, &disk_key, pslot);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(left) > orig_slot) {
path->nodes[level] = left;
path->slots[level + 1] -= 1;
@ -1422,7 +1286,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return ret;
}
btrfs_set_node_key(parent, &disk_key, pslot + 1);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
if (btrfs_header_nritems(mid) <= orig_slot) {
path->nodes[level] = right;
@ -2006,7 +1870,7 @@ static int search_leaf(struct btrfs_trans_handle *trans,
* the extent buffer's header and we have recently accessed
* the header's level field.
*/
ret = comp_keys(&first_key, key);
ret = btrfs_comp_keys(&first_key, key);
if (ret < 0) {
/*
* The first key is smaller than the key we want
@ -2091,8 +1955,8 @@ static int search_leaf(struct btrfs_trans_handle *trans,
}
/*
* btrfs_search_slot - look for a key in a tree and perform necessary
* modifications to preserve tree invariants.
* Look for a key in a tree and perform necessary modifications to preserve
* tree invariants.
*
* @trans: Handle of transaction, used when modifying the tree
* @p: Holds all btree nodes along the search path
@ -2515,7 +2379,7 @@ static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/
if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
btrfs_item_key(path->nodes[0], &found_key, path->slots[0]);
ret = comp_keys(&found_key, &orig_key);
ret = btrfs_comp_keys(&found_key, &orig_key);
if (ret == 0) {
if (path->slots[0] > 0) {
path->slots[0]--;
@ -2530,7 +2394,7 @@ static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
}
btrfs_item_key(path->nodes[0], &found_key, 0);
ret = comp_keys(&found_key, &key);
ret = btrfs_comp_keys(&found_key, &key);
/*
* We might have had an item with the previous key in the tree right
* before we released our path. And after we released our path, that
@ -2678,7 +2542,8 @@ int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
* higher levels
*
*/
static void fixup_low_keys(struct btrfs_path *path,
static void fixup_low_keys(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_disk_key *key, int level)
{
int i;
@ -2695,7 +2560,7 @@ static void fixup_low_keys(struct btrfs_path *path,
BTRFS_MOD_LOG_KEY_REPLACE);
BUG_ON(ret < 0);
btrfs_set_node_key(t, key, tslot);
btrfs_mark_buffer_dirty(path->nodes[i]);
btrfs_mark_buffer_dirty(trans, path->nodes[i]);
if (tslot != 0)
break;
}
@ -2707,10 +2572,11 @@ static void fixup_low_keys(struct btrfs_path *path,
* This function isn't completely safe. It's the caller's responsibility
* that the new key won't break the order
*/
void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *new_key)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *eb;
int slot;
@ -2719,7 +2585,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
slot = path->slots[0];
if (slot > 0) {
btrfs_item_key(eb, &disk_key, slot - 1);
if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
if (unlikely(btrfs_comp_keys(&disk_key, new_key) >= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
@ -2733,7 +2599,7 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
}
if (slot < btrfs_header_nritems(eb) - 1) {
btrfs_item_key(eb, &disk_key, slot + 1);
if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
if (unlikely(btrfs_comp_keys(&disk_key, new_key) <= 0)) {
btrfs_print_leaf(eb);
btrfs_crit(fs_info,
"slot %u key (%llu %u %llu) new key (%llu %u %llu)",
@ -2748,9 +2614,9 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
btrfs_cpu_key_to_disk(&disk_key, new_key);
btrfs_set_item_key(eb, &disk_key, slot);
btrfs_mark_buffer_dirty(eb);
btrfs_mark_buffer_dirty(trans, eb);
if (slot == 0)
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
}
/*
@ -2881,8 +2747,8 @@ static int push_node_left(struct btrfs_trans_handle *trans,
}
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
btrfs_mark_buffer_dirty(src);
btrfs_mark_buffer_dirty(dst);
btrfs_mark_buffer_dirty(trans, src);
btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@ -2957,8 +2823,8 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(src, src_nritems - push_items);
btrfs_set_header_nritems(dst, dst_nritems + push_items);
btrfs_mark_buffer_dirty(src);
btrfs_mark_buffer_dirty(dst);
btrfs_mark_buffer_dirty(trans, src);
btrfs_mark_buffer_dirty(trans, dst);
return ret;
}
@ -2974,7 +2840,6 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 lower_gen;
struct extent_buffer *lower;
struct extent_buffer *c;
@ -2993,11 +2858,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&lower_key, level, root->node->start, 0,
BTRFS_NESTING_NEW_ROOT);
0, BTRFS_NESTING_NEW_ROOT);
if (IS_ERR(c))
return PTR_ERR(c);
root_add_used(root, fs_info->nodesize);
root_add_used_bytes(root);
btrfs_set_header_nritems(c, 1);
btrfs_set_node_key(c, &lower_key, 0);
@ -3007,7 +2872,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
btrfs_set_node_ptr_generation(c, 0, lower_gen);
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(trans, c);
old = root->node;
ret = btrfs_tree_mod_log_insert_root(root->node, c, false);
@ -3079,7 +2944,7 @@ static int insert_ptr(struct btrfs_trans_handle *trans,
WARN_ON(trans->transid == 0);
btrfs_set_node_ptr_generation(lower, slot, trans->transid);
btrfs_set_header_nritems(lower, nritems + 1);
btrfs_mark_buffer_dirty(lower);
btrfs_mark_buffer_dirty(trans, lower);
return 0;
}
@ -3137,11 +3002,11 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&disk_key, level, c->start, 0,
BTRFS_NESTING_SPLIT);
0, BTRFS_NESTING_SPLIT);
if (IS_ERR(split))
return PTR_ERR(split);
root_add_used(root, fs_info->nodesize);
root_add_used_bytes(root);
ASSERT(btrfs_header_level(c) == level);
ret = btrfs_tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
@ -3158,8 +3023,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(split, c_nritems - mid);
btrfs_set_header_nritems(c, mid);
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
btrfs_mark_buffer_dirty(trans, c);
btrfs_mark_buffer_dirty(trans, split);
ret = insert_ptr(trans, path, &disk_key, split->start,
path->slots[level + 1] + 1, level + 1);
@ -3325,15 +3190,15 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(left, left_nritems);
if (left_nritems)
btrfs_mark_buffer_dirty(left);
btrfs_mark_buffer_dirty(trans, left);
else
btrfs_clear_buffer_dirty(trans, left);
btrfs_mark_buffer_dirty(right);
btrfs_mark_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
btrfs_set_node_key(upper, &disk_key, slot + 1);
btrfs_mark_buffer_dirty(upper);
btrfs_mark_buffer_dirty(trans, upper);
/* then fixup the leaf pointer in the path */
if (path->slots[0] >= left_nritems) {
@ -3545,14 +3410,14 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_set_token_item_offset(&token, i, push_space);
}
btrfs_mark_buffer_dirty(left);
btrfs_mark_buffer_dirty(trans, left);
if (right_nritems)
btrfs_mark_buffer_dirty(right);
btrfs_mark_buffer_dirty(trans, right);
else
btrfs_clear_buffer_dirty(trans, right);
btrfs_item_key(right, &disk_key, 0);
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
/* then fixup the leaf pointer in the path */
if (path->slots[0] < push_items) {
@ -3683,8 +3548,8 @@ static noinline int copy_for_split(struct btrfs_trans_handle *trans,
if (ret < 0)
return ret;
btrfs_mark_buffer_dirty(right);
btrfs_mark_buffer_dirty(l);
btrfs_mark_buffer_dirty(trans, right);
btrfs_mark_buffer_dirty(trans, l);
BUG_ON(path->slots[0] != slot);
if (mid <= slot) {
@ -3888,13 +3753,13 @@ again:
* use BTRFS_NESTING_NEW_ROOT.
*/
right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
&disk_key, 0, l->start, 0,
&disk_key, 0, l->start, 0, 0,
num_doubles ? BTRFS_NESTING_NEW_ROOT :
BTRFS_NESTING_SPLIT);
if (IS_ERR(right))
return PTR_ERR(right);
root_add_used(root, fs_info->nodesize);
root_add_used_bytes(root);
if (split == 0) {
if (mid <= slot) {
@ -3925,7 +3790,7 @@ again:
path->nodes[0] = right;
path->slots[0] = 0;
if (path->slots[1] == 0)
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
}
/*
* We create a new leaf 'right' for the required ins_len and
@ -4024,7 +3889,8 @@ err:
return ret;
}
static noinline int split_item(struct btrfs_path *path,
static noinline int split_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *new_key,
unsigned long split_offset)
{
@ -4083,7 +3949,7 @@ static noinline int split_item(struct btrfs_path *path,
write_extent_buffer(leaf, buf + split_offset,
btrfs_item_ptr_offset(leaf, slot),
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
BUG_ON(btrfs_leaf_free_space(leaf) < 0);
kfree(buf);
@ -4117,7 +3983,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = split_item(path, new_key, split_offset);
ret = split_item(trans, path, new_key, split_offset);
return ret;
}
@ -4127,7 +3993,8 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
void btrfs_truncate_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u32 new_size, int from_end)
{
int slot;
struct extent_buffer *leaf;
@ -4203,11 +4070,11 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
btrfs_set_item_key(leaf, &disk_key, slot);
if (slot == 0)
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
}
btrfs_set_item_size(leaf, slot, new_size);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@ -4218,7 +4085,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
void btrfs_extend_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u32 data_size)
{
int slot;
struct extent_buffer *leaf;
@ -4268,7 +4136,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
data_end = old_data;
old_size = btrfs_item_size(leaf, slot);
btrfs_set_item_size(leaf, slot, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@ -4279,6 +4147,7 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
/*
* Make space in the node before inserting one or more items.
*
* @trans: transaction handle
* @root: root we are inserting items to
* @path: points to the leaf/slot where we are going to insert new items
* @batch: information about the batch of items to insert
@ -4286,7 +4155,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
* Main purpose is to save stack depth by doing the bulk of the work in a
* function that doesn't call btrfs_search_slot
*/
static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
static void setup_items_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
const struct btrfs_item_batch *batch)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@ -4306,7 +4176,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
*/
if (path->slots[0] == 0) {
btrfs_cpu_key_to_disk(&disk_key, &batch->keys[0]);
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
}
btrfs_unlock_up_safe(path, 1);
@ -4365,7 +4235,7 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
}
btrfs_set_header_nritems(leaf, nritems + batch->nr);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (btrfs_leaf_free_space(leaf) < 0) {
btrfs_print_leaf(leaf);
@ -4376,12 +4246,14 @@ static void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *p
/*
* Insert a new item into a leaf.
*
* @trans: Transaction handle.
* @root: The root of the btree.
* @path: A path pointing to the target leaf and slot.
* @key: The key of the new item.
* @data_size: The size of the data associated with the new key.
*/
void btrfs_setup_item_for_insert(struct btrfs_root *root,
void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size)
@ -4393,7 +4265,7 @@ void btrfs_setup_item_for_insert(struct btrfs_root *root,
batch.total_data_size = data_size;
batch.nr = 1;
setup_items_for_insert(root, path, &batch);
setup_items_for_insert(trans, root, path, &batch);
}
/*
@ -4419,7 +4291,7 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
slot = path->slots[0];
BUG_ON(slot < 0);
setup_items_for_insert(root, path, batch);
setup_items_for_insert(trans, root, path, batch);
return 0;
}
@ -4444,7 +4316,7 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
leaf = path->nodes[0];
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, data, ptr, data_size);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
}
btrfs_free_path(path);
return ret;
@ -4475,7 +4347,7 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
return ret;
path->slots[0]++;
btrfs_setup_item_for_insert(root, path, new_key, item_size);
btrfs_setup_item_for_insert(trans, root, path, new_key, item_size);
leaf = path->nodes[0];
memcpy_extent_buffer(leaf,
btrfs_item_ptr_offset(leaf, path->slots[0]),
@ -4533,9 +4405,9 @@ int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_node_key(parent, &disk_key, 0);
fixup_low_keys(path, &disk_key, level + 1);
fixup_low_keys(trans, path, &disk_key, level + 1);
}
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
return 0;
}
@ -4567,7 +4439,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
*/
btrfs_unlock_up_safe(path, 0);
root_sub_used(root, leaf->len);
root_sub_used_bytes(root);
atomic_inc(&leaf->refs);
btrfs_free_tree_block(trans, btrfs_root_id(root), leaf, 0, 1);
@ -4632,7 +4504,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_disk_key disk_key;
btrfs_item_key(leaf, &disk_key, 0);
fixup_low_keys(path, &disk_key, 1);
fixup_low_keys(trans, path, &disk_key, 1);
}
/*
@ -4697,11 +4569,11 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* dirtied this buffer
*/
if (path->nodes[0] == leaf)
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
free_extent_buffer(leaf);
}
} else {
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
}
}
return ret;

View File

@ -6,37 +6,10 @@
#ifndef BTRFS_CTREE_H
#define BTRFS_CTREE_H
#include <linux/mm.h>
#include <linux/sched/signal.h>
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
#include <linux/completion.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <trace/events/btrfs.h>
#include <asm/unaligned.h>
#include <linux/pagemap.h>
#include <linux/btrfs.h>
#include <linux/btrfs_tree.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/sizes.h>
#include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include <linux/crc32c.h>
#include <linux/iomap.h>
#include <linux/fscrypt.h>
#include "extent-io-tree.h"
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
#include "block-rsv.h"
#include "locking.h"
#include "misc.h"
#include "fs.h"
#include "accessors.h"
struct btrfs_trans_handle;
struct btrfs_transaction;
@ -218,10 +191,22 @@ struct btrfs_root {
atomic_t log_commit[2];
/* Used only for log trees of subvolumes, not for the log root tree */
atomic_t log_batch;
/*
* Protected by the 'log_mutex' lock but can be read without holding
* that lock to avoid unnecessary lock contention, in which case it
* should be read using btrfs_get_root_log_transid() except if it's a
* log tree in which case it can be directly accessed. Updates to this
* field should always use btrfs_set_root_log_transid(), except for log
* trees where the field can be updated directly.
*/
int log_transid;
/* No matter the commit succeeds or not*/
int log_transid_committed;
/* Just be updated when the commit succeeds. */
/*
* Just be updated when the commit succeeds. Use
* btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
* to access this field.
*/
int last_log_commit;
pid_t log_start_pid;
@ -326,6 +311,9 @@ struct btrfs_root {
/* Used only by log trees, when logging csum items */
struct extent_io_tree log_csum_range;
/* Used in simple quotas, track root during relocation. */
u64 relocation_src_root;
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@ -352,6 +340,26 @@ static inline u64 btrfs_root_id(const struct btrfs_root *root)
return root->root_key.objectid;
}
static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
{
return READ_ONCE(root->log_transid);
}
static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
{
WRITE_ONCE(root->log_transid, log_transid);
}
static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
{
return READ_ONCE(root->last_log_commit);
}
static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
{
WRITE_ONCE(root->last_log_commit, commit_id);
}
/*
* Structure that conveys information about an extent that is going to replace
* all the extents in a file range.
@ -470,30 +478,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
((bytes) >> (fs_info)->sectorsize_bits)
static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
{
return crc32c(crc, address, length);
}
static inline void btrfs_crc32c_final(u32 crc, u8 *result)
{
put_unaligned_le32(~crc, result);
}
static inline u64 btrfs_name_hash(const char *name, int len)
{
return crc32c((u32)~1, name, len);
}
/*
* Figure the key offset of an extended inode ref
*/
static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
int len)
{
return (u64) crc32c(parent_objectid, name, len);
}
static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
{
return mapping_gfp_constraint(mapping, ~__GFP_FS);
@ -513,12 +497,42 @@ int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot);
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
#ifdef __LITTLE_ENDIAN
/*
* Compare two keys, on little-endian the disk order is same as CPU order and
* we can avoid the conversion.
*/
static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
const struct btrfs_key *k2)
{
const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
return btrfs_comp_cpu_keys(k1, k2);
}
#else
/* Compare two keys in a memcmp fashion. */
static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
const struct btrfs_key *k2)
{
struct btrfs_key k1;
btrfs_disk_key_to_cpu(&k1, disk);
return btrfs_comp_cpu_keys(&k1, k2);
}
#endif
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
int type);
int btrfs_previous_extent_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid);
void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
const struct btrfs_key *new_key);
struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
@ -536,6 +550,13 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
enum btrfs_lock_nesting nest);
int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer *parent, int parent_slot,
struct extent_buffer **cow_ret,
u64 search_start, u64 empty_size,
enum btrfs_lock_nesting nest);
int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
@ -545,8 +566,10 @@ int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
struct extent_buffer *buf);
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
void btrfs_extend_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u32 data_size);
void btrfs_truncate_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@ -567,10 +590,6 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_path *p, int find_higher,
int return_any);
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, u64 *last_ret,
struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
@ -610,7 +629,8 @@ struct btrfs_item_batch {
int nr;
};
void btrfs_setup_item_for_insert(struct btrfs_root *root,
void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const struct btrfs_key *key,
u32 data_size);

View File

@ -337,14 +337,119 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
return 0;
}
/*
* Check if two blocks addresses are close, used by defrag.
*/
static bool close_blocks(u64 blocknr, u64 other, u32 blocksize)
{
if (blocknr < other && other - (blocknr + blocksize) < SZ_32K)
return true;
if (blocknr > other && blocknr - (other + blocksize) < SZ_32K)
return true;
return false;
}
/*
* Go through all the leaves pointed to by a node and reallocate them so that
* disk order is close to key order.
*/
static int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *parent,
int start_slot, u64 *last_ret,
struct btrfs_key *progress)
{
struct btrfs_fs_info *fs_info = root->fs_info;
const u32 blocksize = fs_info->nodesize;
const int end_slot = btrfs_header_nritems(parent) - 1;
u64 search_start = *last_ret;
u64 last_block = 0;
int ret = 0;
bool progress_passed = false;
/*
* COWing must happen through a running transaction, which always
* matches the current fs generation (it's a transaction with a state
* less than TRANS_STATE_UNBLOCKED). If it doesn't, then turn the fs
* into error state to prevent the commit of any transaction.
*/
if (unlikely(trans->transaction != fs_info->running_transaction ||
trans->transid != fs_info->generation)) {
btrfs_abort_transaction(trans, -EUCLEAN);
btrfs_crit(fs_info,
"unexpected transaction when attempting to reallocate parent %llu for root %llu, transaction %llu running transaction %llu fs generation %llu",
parent->start, btrfs_root_id(root), trans->transid,
fs_info->running_transaction->transid,
fs_info->generation);
return -EUCLEAN;
}
if (btrfs_header_nritems(parent) <= 1)
return 0;
for (int i = start_slot; i <= end_slot; i++) {
struct extent_buffer *cur;
struct btrfs_disk_key disk_key;
u64 blocknr;
u64 other;
bool close = true;
btrfs_node_key(parent, &disk_key, i);
if (!progress_passed && btrfs_comp_keys(&disk_key, progress) < 0)
continue;
progress_passed = true;
blocknr = btrfs_node_blockptr(parent, i);
if (last_block == 0)
last_block = blocknr;
if (i > 0) {
other = btrfs_node_blockptr(parent, i - 1);
close = close_blocks(blocknr, other, blocksize);
}
if (!close && i < end_slot) {
other = btrfs_node_blockptr(parent, i + 1);
close = close_blocks(blocknr, other, blocksize);
}
if (close) {
last_block = blocknr;
continue;
}
cur = btrfs_read_node_slot(parent, i);
if (IS_ERR(cur))
return PTR_ERR(cur);
if (search_start == 0)
search_start = last_block;
btrfs_tree_lock(cur);
ret = btrfs_force_cow_block(trans, root, cur, parent, i,
&cur, search_start,
min(16 * blocksize,
(end_slot - i) * blocksize),
BTRFS_NESTING_COW);
if (ret) {
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
break;
}
search_start = cur->start;
last_block = cur->start;
*last_ret = search_start;
btrfs_tree_unlock(cur);
free_extent_buffer(cur);
}
return ret;
}
/*
* Defrag all the leaves in a given btree.
* Read all the leaves and try to get key order to
* better reflect disk order
*/
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
static int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_path *path = NULL;
struct btrfs_key key;
@ -460,6 +565,45 @@ done:
return ret;
}
/*
* Defrag a given btree. Every leaf in the btree is read and defragmented.
*/
int btrfs_defrag_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
return 0;
while (1) {
struct btrfs_trans_handle *trans;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
cond_resched();
if (btrfs_fs_closing(fs_info) || ret != -EAGAIN)
break;
if (btrfs_defrag_cancelled(fs_info)) {
btrfs_debug(fs_info, "defrag_root cancelled");
ret = -EAGAIN;
break;
}
}
clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
return ret;
}
/*
* Defrag specific helper to get an extent map.
*
@ -891,8 +1035,8 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
* very likely resulting in a larger extent after writeback is
* triggered (except in a case of free space fragmentation).
*/
if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC, 0, NULL))
if (test_range_bit_exists(&inode->io_tree, cur, cur + range_len - 1,
EXTENT_DELALLOC))
goto next;
/*

View File

@ -12,7 +12,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u32 extent_thresh);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
{

View File

@ -322,9 +322,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
} else {
if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT;
if (btrfs_transaction_in_commit(fs_info))
schedule_timeout(1);
}
num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
@ -346,7 +343,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
noflush);
if (ret)
return ret;
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, meta_reserve, flush);
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
meta_reserve, flush);
if (ret) {
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
return ret;

View File

@ -328,7 +328,8 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
}
/*
* __btrfs_lookup_delayed_item - look up the delayed item by key
* Look up the delayed item by key.
*
* @delayed_node: pointer to the delayed node
* @index: the dir index value to lookup (offset of a dir index key)
*
@ -517,7 +518,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
/*
* For insertions we track reserved metadata space by accounting
* for the number of leaves that will be used, based on the delayed
* node's index_items_size field.
* node's curr_index_batch_size and index_item_leaves fields.
*/
if (item->type == BTRFS_DELAYED_DELETION_ITEM)
item->bytes_reserved = num_bytes;
@ -1030,7 +1031,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
goto out;
@ -1378,8 +1379,7 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM;
async_work->delayed_root = delayed_root;
btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
NULL);
btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL);
async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
@ -1760,8 +1760,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
}
/*
* btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
*
* Read dir info stored in the delayed tree.
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct list_head *ins_list)
@ -1848,10 +1847,8 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_stack_timespec_nsec(&inode_item->ctime,
inode_get_ctime_nsec(inode));
btrfs_set_stack_timespec_sec(&inode_item->otime,
BTRFS_I(inode)->i_otime.tv_sec);
btrfs_set_stack_timespec_nsec(&inode_item->otime,
BTRFS_I(inode)->i_otime.tv_nsec);
btrfs_set_stack_timespec_sec(&inode_item->otime, BTRFS_I(inode)->i_otime_sec);
btrfs_set_stack_timespec_nsec(&inode_item->otime, BTRFS_I(inode)->i_otime_nsec);
}
int btrfs_fill_inode(struct inode *inode, u32 *rdev)
@ -1900,10 +1897,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
inode_set_ctime(inode, btrfs_stack_timespec_sec(&inode_item->ctime),
btrfs_stack_timespec_nsec(&inode_item->ctime));
BTRFS_I(inode)->i_otime.tv_sec =
btrfs_stack_timespec_sec(&inode_item->otime);
BTRFS_I(inode)->i_otime.tv_nsec =
btrfs_stack_timespec_nsec(&inode_item->otime);
BTRFS_I(inode)->i_otime_sec = btrfs_stack_timespec_sec(&inode_item->otime);
BTRFS_I(inode)->i_otime_nsec = btrfs_stack_timespec_nsec(&inode_item->otime);
inode->i_generation = BTRFS_I(inode)->generation;
BTRFS_I(inode)->index_cnt = (u64)-1;
@ -1914,9 +1909,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
}
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_delayed_node *delayed_node;
int ret = 0;

View File

@ -135,7 +135,6 @@ int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode);
int btrfs_fill_inode(struct inode *inode, u32 *rdev);
int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode);

View File

@ -57,16 +57,20 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
* Release a ref head's reservation.
*
* @fs_info: the filesystem
* @nr: number of items to drop
* @nr_refs: number of delayed refs to drop
* @nr_csums: number of csum items to drop
*
* Drops the delayed ref head's count from the delayed refs rsv and free any
* excess reservation we had.
*/
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr);
u64 released = 0;
u64 num_bytes;
u64 released;
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
if (released)
@ -77,26 +81,118 @@ void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
/*
* Adjust the size of the delayed refs rsv.
*
* This is to be called anytime we may have adjusted trans->delayed_ref_updates,
* it'll calculate the additional size and add it to the delayed_refs_rsv.
* This is to be called anytime we may have adjusted trans->delayed_ref_updates
* or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
* add it to the delayed_refs_rsv.
*/
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
u64 num_bytes;
u64 reserved_bytes;
if (!trans->delayed_ref_updates)
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
trans->delayed_ref_csum_deletions);
if (num_bytes == 0)
return;
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
trans->delayed_ref_updates);
/*
* Try to take num_bytes from the transaction's local delayed reserve.
* If not possible, try to take as much as it's available. If the local
* reserve doesn't have enough reserved space, the delayed refs reserve
* will be refilled next time btrfs_delayed_refs_rsv_refill() is called
* by someone or if a transaction commit is triggered before that, the
* global block reserve will be used. We want to minimize using the
* global block reserve for cases we can account for in advance, to
* avoid exhausting it and reach -ENOSPC during a transaction commit.
*/
spin_lock(&local_rsv->lock);
reserved_bytes = min(num_bytes, local_rsv->reserved);
local_rsv->reserved -= reserved_bytes;
local_rsv->full = (local_rsv->reserved >= local_rsv->size);
spin_unlock(&local_rsv->lock);
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
delayed_rsv->full = false;
delayed_rsv->reserved += reserved_bytes;
delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
spin_unlock(&delayed_rsv->lock);
trans->delayed_ref_updates = 0;
trans->delayed_ref_csum_deletions = 0;
}
/*
* Adjust the size of the delayed refs block reserve for 1 block group item
* insertion, used after allocating a block group.
*/
void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
spin_lock(&delayed_rsv->lock);
/*
* Inserting a block group item does not require changing the free space
* tree, only the extent tree or the block group tree, so this is all we
* need.
*/
delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
delayed_rsv->full = false;
spin_unlock(&delayed_rsv->lock);
}
/*
* Adjust the size of the delayed refs block reserve to release space for 1
* block group item insertion.
*/
void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 released;
released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
if (released > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
0, released, 0);
}
/*
* Adjust the size of the delayed refs block reserve for 1 block group item
* update.
*/
void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
spin_lock(&delayed_rsv->lock);
/*
* Updating a block group item does not result in new nodes/leaves and
* does not require changing the free space tree, only the extent tree
* or the block group tree, so this is all we need.
*/
delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
delayed_rsv->full = false;
spin_unlock(&delayed_rsv->lock);
}
/*
* Adjust the size of the delayed refs block reserve to release space for 1
* block group item update.
*/
void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
u64 released;
released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
if (released > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
0, released, 0);
}
/*
@ -154,6 +250,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_space_info *space_info = block_rsv->space_info;
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
u64 refilled_bytes;
@ -170,7 +267,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
if (!num_bytes)
return 0;
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
if (ret)
return ret;
@ -199,8 +296,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
spin_unlock(&block_rsv->lock);
if (to_free > 0)
btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
to_free);
btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
if (refilled_bytes > 0)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
@ -422,7 +518,8 @@ int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
return 0;
}
static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref)
{
@ -433,9 +530,11 @@ static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
list_del(&ref->add_list);
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
static bool merge_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref,
u64 seq)
@ -464,10 +563,10 @@ static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
mod = -next->ref_mod;
}
drop_delayed_ref(delayed_refs, head, next);
drop_delayed_ref(fs_info, delayed_refs, head, next);
ref->ref_mod += mod;
if (ref->ref_mod == 0) {
drop_delayed_ref(delayed_refs, head, ref);
drop_delayed_ref(fs_info, delayed_refs, head, ref);
done = true;
} else {
/*
@ -505,7 +604,7 @@ again:
ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
continue;
if (merge_ref(delayed_refs, head, ref, seq))
if (merge_ref(fs_info, delayed_refs, head, ref, seq))
goto again;
}
}
@ -584,10 +683,11 @@ void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
* Return true if the ref was merged into an existing one (and therefore can be
* freed by the caller).
*/
static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *href,
struct btrfs_delayed_ref_node *ref)
{
struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
struct btrfs_delayed_ref_node *exist;
int mod;
@ -598,6 +698,7 @@ static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
spin_unlock(&href->lock);
trans->delayed_ref_updates++;
return false;
}
@ -626,7 +727,7 @@ static bool insert_delayed_ref(struct btrfs_delayed_ref_root *root,
/* remove existing tail if its ref_mod is zero */
if (exist->ref_mod == 0)
drop_delayed_ref(root, href, exist);
drop_delayed_ref(trans->fs_info, root, href, exist);
spin_unlock(&href->lock);
return true;
}
@ -647,6 +748,15 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
BUG_ON(existing->is_data != update->is_data);
spin_lock(&existing->lock);
/*
* When freeing an extent, we may not know the owning root when we
* first create the head_ref. However, some deref before the last deref
* will know it, so we just need to update the head_ref accordingly.
*/
if (!existing->owning_root)
existing->owning_root = update->owning_root;
if (update->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
@ -656,6 +766,7 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
* Set it again here
*/
existing->must_insert_reserved = update->must_insert_reserved;
existing->owning_root = update->owning_root;
/*
* update the num_bytes so we make sure the accounting
@ -695,6 +806,8 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
* We reserve bytes for csum deletion when adding or updating a ref head
* see add_delayed_ref_head() for more details.
*/
if (existing->is_data) {
u64 csum_leaves =
@ -703,11 +816,11 @@ static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
delayed_refs->pending_csums -= existing->num_bytes;
btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
}
if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
delayed_refs->pending_csums += existing->num_bytes;
trans->delayed_ref_updates += csum_leaves;
trans->delayed_ref_csum_deletions += csum_leaves;
}
}
@ -718,7 +831,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
struct btrfs_qgroup_extent_record *qrecord,
u64 bytenr, u64 num_bytes, u64 ref_root,
u64 reserved, int action, bool is_data,
bool is_system)
bool is_system, u64 owning_root)
{
int count_mod = 1;
bool must_insert_reserved = false;
@ -758,7 +871,9 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
head_ref->bytenr = bytenr;
head_ref->num_bytes = num_bytes;
head_ref->ref_mod = count_mod;
head_ref->reserved_bytes = reserved;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->owning_root = owning_root;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
head_ref->ref_tree = RB_ROOT_CACHED;
@ -819,16 +934,21 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
head_ref = existing;
} else {
/*
* We reserve the amount of bytes needed to delete csums when
* adding the ref head and not when adding individual drop refs
* since the csum items are deleted only after running the last
* delayed drop ref (the data extent's ref count drops to 0).
*/
if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
trans->delayed_ref_updates +=
trans->delayed_ref_csum_deletions +=
btrfs_csum_bytes_to_leaves(trans->fs_info,
head_ref->num_bytes);
}
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
trans->delayed_ref_updates++;
}
if (qrecord_inserted_ret)
*qrecord_inserted_ret = qrecord_inserted;
@ -837,8 +957,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
}
/*
* init_delayed_ref_common - Initialize the structure which represents a
* modification to a an extent.
* Initialize the structure which represents a modification to a an extent.
*
* @fs_info: Internal to the mounted filesystem mount structure.
*
@ -909,7 +1028,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
u64 parent = generic_ref->parent;
u8 ref_type;
is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
@ -922,8 +1041,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
!generic_ref->skip_qgroup) {
if (btrfs_qgroup_enabled(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
@ -938,15 +1056,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
ref_type = BTRFS_TREE_BLOCK_REF_KEY;
init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
generic_ref->tree_ref.owning_root, action,
generic_ref->tree_ref.ref_root, action,
ref_type);
ref->root = generic_ref->tree_ref.owning_root;
ref->root = generic_ref->tree_ref.ref_root;
ref->parent = parent;
ref->level = level;
init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
generic_ref->tree_ref.owning_root, 0, action,
false, is_system);
generic_ref->tree_ref.ref_root, 0, action,
false, is_system, generic_ref->owning_root);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
@ -959,7 +1077,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted);
merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
merged = insert_delayed_ref(trans, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
/*
@ -998,7 +1116,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
u64 bytenr = generic_ref->bytenr;
u64 num_bytes = generic_ref->len;
u64 parent = generic_ref->parent;
u64 ref_root = generic_ref->data_ref.owning_root;
u64 ref_root = generic_ref->data_ref.ref_root;
u64 owner = generic_ref->data_ref.ino;
u64 offset = generic_ref->data_ref.offset;
u8 ref_type;
@ -1026,8 +1144,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
return -ENOMEM;
}
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
!generic_ref->skip_qgroup) {
if (btrfs_qgroup_enabled(fs_info) && !generic_ref->skip_qgroup) {
record = kzalloc(sizeof(*record), GFP_NOFS);
if (!record) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
@ -1038,7 +1155,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
}
init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
reserved, action, true, false);
reserved, action, true, false, generic_ref->owning_root);
head_ref->extent_op = NULL;
delayed_refs = &trans->transaction->delayed_refs;
@ -1051,7 +1168,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
head_ref = add_delayed_ref_head(trans, head_ref, record,
action, &qrecord_inserted);
merged = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
merged = insert_delayed_ref(trans, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
/*
@ -1084,7 +1201,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
return -ENOMEM;
init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
BTRFS_UPDATE_DELAYED_HEAD, false, false);
BTRFS_UPDATE_DELAYED_HEAD, false, false, 0);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;

View File

@ -9,10 +9,16 @@
#include <linux/refcount.h>
/* these are the possible values of struct btrfs_delayed_ref_node->action */
#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
enum btrfs_delayed_ref_action {
/* Add one backref to the tree */
BTRFS_ADD_DELAYED_REF = 1,
/* Delete one backref from the tree */
BTRFS_DROP_DELAYED_REF,
/* Record a full extent allocation */
BTRFS_ADD_DELAYED_EXTENT,
/* Not changing ref count on head ref */
BTRFS_UPDATE_DELAYED_HEAD,
} __packed;
struct btrfs_delayed_ref_node {
struct rb_node ref_node;
@ -104,6 +110,18 @@ struct btrfs_delayed_ref_head {
*/
int ref_mod;
/*
* The root that triggered the allocation when must_insert_reserved is
* set to true.
*/
u64 owning_root;
/*
* Track reserved bytes when setting must_insert_reserved. On success
* or cleanup, we will need to free the reservation.
*/
u64 reserved_bytes;
/*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
@ -117,6 +135,7 @@ struct btrfs_delayed_ref_head {
* the free has happened.
*/
bool must_insert_reserved;
bool is_data;
bool is_system;
bool processing;
@ -183,13 +202,13 @@ enum btrfs_ref_type {
BTRFS_REF_DATA,
BTRFS_REF_METADATA,
BTRFS_REF_LAST,
};
} __packed;
struct btrfs_data_ref {
/* For EXTENT_DATA_REF */
/* Original root this data extent belongs to */
u64 owning_root;
/* Root which owns this data reference. */
u64 ref_root;
/* Inode which refers to this data extent */
u64 ino;
@ -212,18 +231,18 @@ struct btrfs_tree_ref {
int level;
/*
* Root which owns this tree block.
* Root which owns this tree block reference.
*
* For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
*/
u64 owning_root;
u64 ref_root;
/* For non-skinny metadata, no special member needed */
};
struct btrfs_ref {
enum btrfs_ref_type type;
int action;
enum btrfs_delayed_ref_action action;
/*
* Whether this extent should go through qgroup record.
@ -239,6 +258,7 @@ struct btrfs_ref {
#endif
u64 bytenr;
u64 len;
u64 owning_root;
/* Bytenr of the parent tree block */
u64 parent;
@ -277,24 +297,37 @@ static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_in
return num_bytes;
}
static inline u64 btrfs_calc_delayed_ref_csum_bytes(const struct btrfs_fs_info *fs_info,
int num_csum_items)
{
/*
* Deleting csum items does not result in new nodes/leaves and does not
* require changing the free space tree, only the csum tree, so this is
* all we need.
*/
return btrfs_calc_metadata_size(fs_info, num_csum_items);
}
static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
int action, u64 bytenr, u64 len, u64 parent)
int action, u64 bytenr, u64 len,
u64 parent, u64 owning_root)
{
generic_ref->action = action;
generic_ref->bytenr = bytenr;
generic_ref->len = len;
generic_ref->parent = parent;
generic_ref->owning_root = owning_root;
}
static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
int level, u64 root, u64 mod_root, bool skip_qgroup)
static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level,
u64 root, u64 mod_root, bool skip_qgroup)
{
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: root;
#endif
generic_ref->tree_ref.level = level;
generic_ref->tree_ref.owning_root = root;
generic_ref->tree_ref.ref_root = root;
generic_ref->type = BTRFS_REF_METADATA;
if (skip_qgroup || !(is_fstree(root) &&
(!mod_root || is_fstree(mod_root))))
@ -312,7 +345,7 @@ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
/* If @real_root not set, use @root as fallback */
generic_ref->real_root = mod_root ?: ref_root;
#endif
generic_ref->data_ref.owning_root = ref_root;
generic_ref->data_ref.ref_root = ref_root;
generic_ref->data_ref.ino = ino;
generic_ref->data_ref.offset = offset;
generic_ref->type = BTRFS_REF_DATA;
@ -338,7 +371,6 @@ btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
WARN_ON(refcount_read(&ref->refs) == 0);
if (refcount_dec_and_test(&ref->refs)) {
WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
switch (ref->type) {
@ -402,8 +434,12 @@ struct btrfs_delayed_ref_head *btrfs_select_ref_head(
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq);
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums);
void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info);
void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,

View File

@ -17,7 +17,6 @@
#include "print-tree.h"
#include "volumes.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "dev-replace.h"
#include "sysfs.h"
#include "zoned.h"
@ -444,7 +443,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans)
dev_replace->item_needs_writeback = 0;
up_write(&dev_replace->rwsem);
btrfs_mark_buffer_dirty(eb);
btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);

View File

@ -38,7 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
btrfs_extend_item(path, data_size);
btrfs_extend_item(trans, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
@ -93,7 +93,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, name, name_ptr, name_len);
write_extent_buffer(leaf, data, data_ptr, data_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
return ret;
}
@ -153,7 +153,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
name_ptr = (unsigned long)(dir_item + 1);
write_extent_buffer(leaf, name->name, name_ptr, name->len);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
second_insert:
/* FIXME, use some real flag for selecting the extra index */
@ -439,7 +439,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
btrfs_truncate_item(path, item_len - sub_item_len, 1);
btrfs_truncate_item(trans, path, item_len - sub_item_len, 1);
}
return ret;
}

View File

@ -3,6 +3,10 @@
#ifndef BTRFS_DIR_ITEM_H
#define BTRFS_DIR_ITEM_H
#include <linux/crc32c.h>
struct fscrypt_str;
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
const struct fscrypt_str *name);
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
@ -39,4 +43,9 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
const char *name,
int name_len);
static inline u64 btrfs_name_hash(const char *name, int len)
{
return crc32c((u32)~1, name, len);
}
#endif

View File

@ -29,7 +29,6 @@
#include "tree-log.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
#include "raid56.h"
@ -245,6 +244,7 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
struct extent_buffer *eb = bbio->private;
struct btrfs_fs_info *fs_info = eb->fs_info;
u64 found_start = btrfs_header_bytenr(eb);
u64 last_trans;
u8 result[BTRFS_CSUM_SIZE];
int ret;
@ -282,12 +282,12 @@ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
* Also check the generation, the eb reached here must be newer than
* last committed. Or something seriously wrong happened.
*/
if (unlikely(btrfs_header_generation(eb) <= fs_info->last_trans_committed)) {
last_trans = btrfs_get_last_trans_committed(fs_info);
if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
ret = -EUCLEAN;
btrfs_err(fs_info,
"block=%llu bad generation, have %llu expect > %llu",
eb->start, btrfs_header_generation(eb),
fs_info->last_trans_committed);
eb->start, btrfs_header_generation(eb), last_trans);
goto error;
}
write_extent_buffer(eb, result, 0, fs_info->csum_size);
@ -318,9 +318,10 @@ static bool check_tree_block_fsid(struct extent_buffer *eb)
BTRFS_FSID_SIZE);
/*
* alloc_fs_devices() copies the fsid into metadata_uuid if the
* metadata_uuid is unset in the superblock, including for a seed device.
* So, we can use fs_devices->metadata_uuid.
* alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
* This is then overwritten by metadata_uuid if it is present in the
* device_list_add(). The same true for a seed device as well. So use of
* fs_devices::metadata_uuid is appropriate here.
*/
if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
return false;
@ -675,9 +676,9 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
refcount_set(&root->refs, 1);
atomic_set(&root->snapshot_force_cow, 0);
atomic_set(&root->nr_swapfiles, 0);
root->log_transid = 0;
btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
root->last_log_commit = 0;
btrfs_set_root_last_log_commit(root, 0);
root->anon_dev = 0;
if (!dummy) {
extent_io_tree_init(fs_info, &root->dirty_log_pages,
@ -859,7 +860,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
root->root_key.offset = 0;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
BTRFS_NESTING_NORMAL);
0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
leaf = NULL;
@ -867,7 +868,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
}
root->node = leaf;
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
root->commit_root = btrfs_root_node(root);
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
@ -936,13 +937,13 @@ int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
*/
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf))
return PTR_ERR(leaf);
root->node = leaf;
btrfs_mark_buffer_dirty(root->node);
btrfs_mark_buffer_dirty(trans, root->node);
btrfs_tree_unlock(root->node);
return 0;
@ -1004,9 +1005,9 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(root->log_root);
root->log_root = log_root;
root->log_transid = 0;
btrfs_set_root_log_transid(root, 0);
root->log_transid_committed = -1;
root->last_log_commit = 0;
btrfs_set_root_last_log_commit(root, 0);
return 0;
}
@ -1179,6 +1180,8 @@ static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
return btrfs_grab_root(fs_info->block_group_root);
case BTRFS_FREE_SPACE_TREE_OBJECTID:
return btrfs_grab_root(btrfs_global_root(fs_info, &key));
case BTRFS_RAID_STRIPE_TREE_OBJECTID:
return btrfs_grab_root(fs_info->stripe_root);
default:
return NULL;
}
@ -1259,6 +1262,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_put_root(fs_info->fs_root);
btrfs_put_root(fs_info->data_reloc_root);
btrfs_put_root(fs_info->block_group_root);
btrfs_put_root(fs_info->stripe_root);
btrfs_check_leaked_roots(fs_info);
btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy);
@ -1402,7 +1406,8 @@ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
}
/*
* btrfs_get_fs_root_commit_root - return a root for the given objectid
* Return a root for the given objectid.
*
* @fs_info: the fs_info
* @objectid: the objectid we need to lookup
*
@ -1699,11 +1704,11 @@ static void backup_super_roots(struct btrfs_fs_info *info)
}
/*
* read_backup_root - Reads a backup root based on the passed priority. Prio 0
* is the newest, prio 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
* Reads a backup root based on the passed priority. Prio 0 is the newest, prio
* 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
*
* fs_info - filesystem whose backup roots need to be read
* priority - priority of backup root required
* @fs_info: filesystem whose backup roots need to be read
* @priority: priority of backup root required
*
* Returns backup root index on success and -EINVAL otherwise.
*/
@ -1803,6 +1808,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
free_root_extent_buffers(info->fs_root);
free_root_extent_buffers(info->data_reloc_root);
free_root_extent_buffers(info->block_group_root);
free_root_extent_buffers(info->stripe_root);
if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
}
@ -2262,7 +2268,6 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
fs_info->quota_root = root;
}
@ -2279,6 +2284,20 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
fs_info->uuid_root = root;
}
if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (IS_ERR(root)) {
if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
ret = PTR_ERR(root);
goto out;
}
} else {
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->stripe_root = root;
}
}
return 0;
out:
btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
@ -2381,7 +2400,8 @@ int btrfs_validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
if (memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
if (!fs_info->fs_devices->temp_fsid &&
memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
"superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
sb->fsid, fs_info->fs_devices->fsid);
@ -2634,7 +2654,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
/* All successful */
fs_info->generation = btrfs_header_generation(tree_root->node);
fs_info->last_trans_committed = fs_info->generation;
btrfs_set_last_trans_committed(fs_info, fs_info->generation);
fs_info->last_reloc_trans = 0;
/* Always begin writing backup roots after the one being used */
@ -2735,9 +2755,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
spin_lock_init(&fs_info->ordered_root_lock);
btrfs_init_scrub(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
fs_info->check_integrity_print_mask = 0;
#endif
btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(fs_info);
@ -3157,7 +3174,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
u32 nodesize;
u32 stripesize;
u64 generation;
u64 features;
u16 csum_type;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@ -3239,15 +3255,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
disk_super = fs_info->super_copy;
features = btrfs_super_flags(disk_super);
if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
btrfs_set_super_flags(disk_super, features);
btrfs_info(fs_info,
"found metadata UUID change in progress flag, clearing");
}
memcpy(fs_info->super_for_commit, fs_info->super_copy,
sizeof(*fs_info->super_for_commit));
@ -3509,18 +3516,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
"auto enabling async discard");
}
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(fs_info, fs_devices,
btrfs_test_opt(fs_info,
CHECK_INTEGRITY_DATA) ? 1 : 0,
fs_info->check_integrity_print_mask);
if (ret)
btrfs_warn(fs_info,
"failed to initialize integrity check module: %d",
ret);
}
#endif
ret = btrfs_read_qgroup_config(fs_info);
if (ret)
goto fail_trans_kthread;
@ -3820,8 +3815,6 @@ static int write_dev_supers(struct btrfs_device *device,
*/
if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
bio->bi_opf |= REQ_FUA;
btrfsic_check_bio(bio);
submit_bio(bio);
if (btrfs_advance_sb_log(device, i))
@ -3917,28 +3910,11 @@ static void write_dev_flush(struct btrfs_device *device)
device->last_flush_error = BLK_STS_OK;
#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
/*
* When a disk has write caching disabled, we skip submission of a bio
* with flush and sync requests before writing the superblock, since
* it's not needed. However when the integrity checker is enabled, this
* results in reports that there are metadata blocks referred by a
* superblock that were not properly flushed. So don't skip the bio
* submission only when the integrity checker is enabled for the sake
* of simplicity, since this is a debug tool and not meant for use in
* non-debug builds.
*/
if (!bdev_write_cache(device->bdev))
return;
#endif
bio_init(bio, device->bdev, NULL, 0,
REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
bio->bi_end_io = btrfs_end_empty_barrier;
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
btrfsic_check_bio(bio);
submit_bio(bio);
set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
}
@ -4414,16 +4390,12 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
btrfsic_unmount(fs_info->fs_devices);
#endif
btrfs_mapping_tree_free(&fs_info->mapping_tree);
btrfs_close_devices(fs_info->fs_devices);
}
void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *buf)
{
struct btrfs_fs_info *fs_info = buf->fs_info;
u64 transid = btrfs_header_generation(buf);
@ -4437,21 +4409,16 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
return;
#endif
/* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
ASSERT(trans->transid == fs_info->generation);
btrfs_assert_tree_write_locked(buf);
if (transid != fs_info->generation)
WARN(1, KERN_CRIT "btrfs transid mismatch buffer %llu, found %llu running %llu\n",
buf->start, transid, fs_info->generation);
set_extent_buffer_dirty(buf);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
/*
* btrfs_check_leaf() won't check item data if we don't have WRITTEN
* set, so this will only validate the basic structure of the items.
*/
if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(buf)) {
btrfs_print_leaf(buf);
ASSERT(0);
if (unlikely(transid != fs_info->generation)) {
btrfs_abort_transaction(trans, -EUCLEAN);
btrfs_crit(fs_info,
"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
buf->start, transid, fs_info->generation);
}
#endif
set_extent_buffer_dirty(buf);
}
static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
@ -4611,6 +4578,7 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
}
if (head->must_insert_reserved)
pin_bytes = true;
@ -4808,7 +4776,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);

View File

@ -104,7 +104,8 @@ static inline struct btrfs_root *btrfs_grab_root(struct btrfs_root *root)
}
void btrfs_put_root(struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_extent_buffer(struct extent_buffer *buf,

View File

@ -105,32 +105,40 @@ void extent_io_tree_init(struct btrfs_fs_info *fs_info,
lockdep_set_class(&tree->lock, &file_extent_tree_class);
}
/*
* Empty an io tree, removing and freeing every extent state record from the
* tree. This should be called once we are sure no other task can access the
* tree anymore, so no tree updates happen after we empty the tree and there
* aren't any waiters on any extent state record (EXTENT_LOCKED bit is never
* set on any extent state when calling this function).
*/
void extent_io_tree_release(struct extent_io_tree *tree)
{
spin_lock(&tree->lock);
/*
* Do a single barrier for the waitqueue_active check here, the state
* of the waitqueue should not change once extent_io_tree_release is
* called.
*/
smp_mb();
while (!RB_EMPTY_ROOT(&tree->state)) {
struct rb_node *node;
struct extent_state *state;
struct rb_root root;
struct extent_state *state;
struct extent_state *tmp;
node = rb_first(&tree->state);
state = rb_entry(node, struct extent_state, rb_node);
rb_erase(&state->rb_node, &tree->state);
spin_lock(&tree->lock);
root = tree->state;
tree->state = RB_ROOT;
rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) {
/* Clear node to keep free_extent_state() happy. */
RB_CLEAR_NODE(&state->rb_node);
ASSERT(!(state->state & EXTENT_LOCKED));
/*
* btree io trees aren't supposed to have tasks waiting for
* changes in the flags of extent states ever.
* No need for a memory barrier here, as we are holding the tree
* lock and we only change the waitqueue while holding that lock
* (see wait_extent_bit()).
*/
ASSERT(!waitqueue_active(&state->wq));
free_extent_state(state);
cond_resched_lock(&tree->lock);
}
/*
* Should still be empty even after a reschedule, no other task should
* be accessing the tree anymore.
*/
ASSERT(RB_EMPTY_ROOT(&tree->state));
spin_unlock(&tree->lock);
}
@ -327,6 +335,36 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
"locking error: extent tree was modified by another thread while locked");
}
static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state)
{
struct extent_state *prev;
prev = prev_state(state);
if (prev && prev->end == state->start - 1 && prev->state == state->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode, state, prev);
state->start = prev->start;
rb_erase(&prev->rb_node, &tree->state);
RB_CLEAR_NODE(&prev->rb_node);
free_extent_state(prev);
}
}
static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state)
{
struct extent_state *next;
next = next_state(state);
if (next && next->start == state->end + 1 && next->state == state->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode, state, next);
state->end = next->end;
rb_erase(&next->rb_node, &tree->state);
RB_CLEAR_NODE(&next->rb_node);
free_extent_state(next);
}
}
/*
* Utility function to look for merge candidates inside a given range. Any
* extents with matching state are merged together into a single extent in the
@ -338,31 +376,11 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
*/
static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
{
struct extent_state *other;
if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
return;
other = prev_state(state);
if (other && other->end == state->start - 1 &&
other->state == state->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode, state, other);
state->start = other->start;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
free_extent_state(other);
}
other = next_state(state);
if (other && other->start == state->end + 1 &&
other->state == state->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode, state, other);
state->end = other->end;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
free_extent_state(other);
}
merge_prev_state(tree, state);
merge_next_state(tree, state);
}
static void set_state_bits(struct extent_io_tree *tree,
@ -384,19 +402,27 @@ static void set_state_bits(struct extent_io_tree *tree,
* Insert an extent_state struct into the tree. 'bits' are set on the
* struct before it is inserted.
*
* This may return -EEXIST if the extent is already there, in which case the
* state struct is freed.
* Returns a pointer to the struct extent_state record containing the range
* requested for insertion, which may be the same as the given struct or it
* may be an existing record in the tree that was expanded to accommodate the
* requested range. In case of an extent_state different from the one that was
* given, the later can be freed or reused by the caller.
*
* On error it returns an error pointer.
*
* The tree lock is not taken internally. This is a utility function and
* probably isn't what you want to call (see set/clear_extent_bit).
*/
static int insert_state(struct extent_io_tree *tree,
struct extent_state *state,
u32 bits, struct extent_changeset *changeset)
static struct extent_state *insert_state(struct extent_io_tree *tree,
struct extent_state *state,
u32 bits,
struct extent_changeset *changeset)
{
struct rb_node **node;
struct rb_node *parent = NULL;
const u64 end = state->end;
const u64 start = state->start - 1;
const u64 end = state->end + 1;
const bool try_merge = !(bits & (EXTENT_LOCKED | EXTENT_BOUNDARY));
set_state_bits(tree, state, bits, changeset);
@ -407,23 +433,42 @@ static int insert_state(struct extent_io_tree *tree,
parent = *node;
entry = rb_entry(parent, struct extent_state, rb_node);
if (end < entry->start) {
if (state->end < entry->start) {
if (try_merge && end == entry->start &&
state->state == entry->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode,
state, entry);
entry->start = state->start;
merge_prev_state(tree, entry);
state->state = 0;
return entry;
}
node = &(*node)->rb_left;
} else if (end > entry->end) {
} else if (state->end > entry->end) {
if (try_merge && entry->end == start &&
state->state == entry->state) {
if (tree->inode)
btrfs_merge_delalloc_extent(tree->inode,
state, entry);
entry->end = state->end;
merge_next_state(tree, entry);
state->state = 0;
return entry;
}
node = &(*node)->rb_right;
} else {
btrfs_err(tree->fs_info,
"found node %llu %llu on insert of %llu %llu",
entry->start, entry->end, state->start, end);
return -EEXIST;
entry->start, entry->end, state->start, state->end);
return ERR_PTR(-EEXIST);
}
}
rb_link_node(&state->rb_node, parent, node);
rb_insert_color(&state->rb_node, &tree->state);
merge_state(tree, state);
return 0;
return state;
}
/*
@ -708,26 +753,13 @@ out:
}
static void wait_on_state(struct extent_io_tree *tree,
struct extent_state *state)
__releases(tree->lock)
__acquires(tree->lock)
{
DEFINE_WAIT(wait);
prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&tree->lock);
schedule();
spin_lock(&tree->lock);
finish_wait(&state->wq, &wait);
}
/*
* Wait for one or more bits to clear on a range in the state tree.
* The range [start, end] is inclusive.
* The tree lock is taken by this function
*/
void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
struct extent_state **cached_state)
static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_state **cached_state)
{
struct extent_state *state;
@ -758,9 +790,15 @@ process_node:
goto out;
if (state->state & bits) {
DEFINE_WAIT(wait);
start = state->start;
refcount_inc(&state->refs);
wait_on_state(tree, state);
prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock(&tree->lock);
schedule();
spin_lock(&tree->lock);
finish_wait(&state->wq, &wait);
free_extent_state(state);
goto again;
}
@ -847,10 +885,19 @@ bool find_first_extent_bit(struct extent_io_tree *tree, u64 start,
if (state->end == start - 1 && extent_state_in_tree(state)) {
while ((state = next_state(state)) != NULL) {
if (state->state & bits)
goto got_it;
break;
}
/*
* If we found the next extent state, clear cached_state
* so that we can cache the next extent state below and
* avoid future calls going over the same extent state
* again. If we haven't found any, clear as well since
* it's now useless.
*/
free_extent_state(*cached_state);
*cached_state = NULL;
if (state)
goto got_it;
goto out;
}
free_extent_state(*cached_state);
@ -1133,6 +1180,8 @@ hit_next:
*/
if (state->start > start) {
u64 this_end;
struct extent_state *inserted_state;
if (end < last_start)
this_end = end;
else
@ -1148,12 +1197,15 @@ hit_next:
*/
prealloc->start = start;
prealloc->end = this_end;
err = insert_state(tree, prealloc, bits, changeset);
if (err)
inserted_state = insert_state(tree, prealloc, bits, changeset);
if (IS_ERR(inserted_state)) {
err = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, err);
}
cache_state(prealloc, cached_state);
prealloc = NULL;
cache_state(inserted_state, cached_state);
if (inserted_state == prealloc)
prealloc = NULL;
start = this_end + 1;
goto search_again;
}
@ -1356,6 +1408,8 @@ hit_next:
*/
if (state->start > start) {
u64 this_end;
struct extent_state *inserted_state;
if (end < last_start)
this_end = end;
else
@ -1373,11 +1427,14 @@ hit_next:
*/
prealloc->start = start;
prealloc->end = this_end;
err = insert_state(tree, prealloc, bits, NULL);
if (err)
inserted_state = insert_state(tree, prealloc, bits, NULL);
if (IS_ERR(inserted_state)) {
err = PTR_ERR(inserted_state);
extent_io_tree_panic(tree, err);
cache_state(prealloc, cached_state);
prealloc = NULL;
}
cache_state(inserted_state, cached_state);
if (inserted_state == prealloc)
prealloc = NULL;
start = this_end + 1;
goto search_again;
}
@ -1640,15 +1697,46 @@ search:
}
/*
* Search a range in the state tree for a given mask. If 'filled' == 1, this
* returns 1 only if every extent in the tree has the bits set. Otherwise, 1
* is returned if any bit in the range is found set.
* Check if the single @bit exists in the given range.
*/
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, int filled, struct extent_state *cached)
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit)
{
struct extent_state *state = NULL;
int bitset = 0;
bool bitset = false;
ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
state = tree_search(tree, start);
while (state && start <= end) {
if (state->start > end)
break;
if (state->state & bit) {
bitset = true;
break;
}
/* If state->end is (u64)-1, start will overflow to 0 */
start = state->end + 1;
if (start > end || start == 0)
break;
state = next_state(state);
}
spin_unlock(&tree->lock);
return bitset;
}
/*
* Check if the whole range [@start,@end) contains the single @bit set.
*/
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached)
{
struct extent_state *state = NULL;
bool bitset = true;
ASSERT(is_power_of_2(bit));
spin_lock(&tree->lock);
if (cached && extent_state_in_tree(cached) && cached->start <= start &&
@ -1657,35 +1745,35 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
else
state = tree_search(tree, start);
while (state && start <= end) {
if (filled && state->start > start) {
bitset = 0;
if (state->start > start) {
bitset = false;
break;
}
if (state->start > end)
break;
if (state->state & bits) {
bitset = 1;
if (!filled)
break;
} else if (filled) {
bitset = 0;
if ((state->state & bit) == 0) {
bitset = false;
break;
}
if (state->end == (u64)-1)
break;
/*
* Last entry (if state->end is (u64)-1 and overflow happens),
* or next entry starts after the range.
*/
start = state->end + 1;
if (start > end)
if (start > end || start == 0)
break;
state = next_state(state);
}
/* We ran out of states and were still inside of our range. */
if (filled && !state)
bitset = 0;
if (!state)
bitset = false;
spin_unlock(&tree->lock);
return bitset;
}

View File

@ -131,8 +131,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
struct extent_state **cached_state);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, int filled, struct extent_state *cached_state);
bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit,
struct extent_state *cached_state);
bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
u32 bits, struct extent_changeset *changeset);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@ -192,7 +193,5 @@ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits,
struct extent_state **cached_state);
#endif /* BTRFS_EXTENT_IO_TREE_H */

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@
#include "block-group.h"
struct btrfs_free_cluster;
struct btrfs_delayed_ref_head;
enum btrfs_extent_allocation_policy {
BTRFS_EXTENT_ALLOC_CLUSTERED,
@ -91,8 +92,8 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
enum btrfs_inline_ref_type is_data);
u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes);
u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_head *head);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
@ -102,7 +103,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
int btrfs_pin_extent(struct btrfs_trans_handle *trans, u64 bytenr, u64 num,
int reserved);
int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes);
const struct extent_buffer *eb);
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr, bool strict,
@ -113,6 +114,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
const struct btrfs_disk_key *key,
int level, u64 hint,
u64 empty_size,
u64 reloc_src_root,
enum btrfs_lock_nesting nest);
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
u64 root_id,
@ -136,12 +138,15 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, u64 flags);
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref);
u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf, int slot);
int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc);
int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, u64 len);
int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans,
const struct extent_buffer *eb);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, struct btrfs_ref *generic_ref);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref,
int for_reloc);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,

View File

@ -21,7 +21,6 @@
#include "ctree.h"
#include "btrfs_inode.h"
#include "bio.h"
#include "check-integrity.h"
#include "locking.h"
#include "rcu-string.h"
#include "backref.h"
@ -395,7 +394,7 @@ again:
/* then test to make sure it is all still delalloc */
ret = test_range_bit(tree, delalloc_start, delalloc_end,
EXTENT_DELALLOC, 1, cached_state);
EXTENT_DELALLOC, cached_state);
if (!ret) {
unlock_extent(tree, delalloc_start, delalloc_end,
&cached_state);
@ -2294,7 +2293,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
u64 end = start + PAGE_SIZE - 1;
int ret = 1;
if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
ret = 0;
} else {
u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
@ -2353,9 +2352,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
free_extent_map(em);
break;
}
if (test_range_bit(tree, em->start,
extent_map_end(em) - 1,
EXTENT_LOCKED, 0, NULL))
if (test_range_bit_exists(tree, em->start,
extent_map_end(em) - 1,
EXTENT_LOCKED))
goto next;
/*
* If it's not in the list of modified extents, used
@ -3455,6 +3454,12 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
start, fs_info->nodesize);
return -EINVAL;
}
if (!IS_ALIGNED(start, fs_info->nodesize) &&
!test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
btrfs_warn(fs_info,
"tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
start, fs_info->nodesize);
}
return 0;
}
@ -4248,14 +4253,14 @@ void copy_extent_buffer(const struct extent_buffer *dst,
}
/*
* eb_bitmap_offset() - calculate the page and offset of the byte containing the
* given bit number
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number
* @page_index: return index of the page in the extent buffer that contains the
* given bit number
* @page_offset: return offset into the page given by page_index
* Calculate the page and offset of the byte containing the given bit number.
*
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number
* @page_index: return index of the page in the extent buffer that contains
* the given bit number
* @page_offset: return offset into the page given by page_index
*
* This helper hides the ugliness of finding the byte in an extent buffer which
* contains a given bit.
@ -4614,7 +4619,8 @@ int try_release_extent_buffer(struct page *page)
}
/*
* btrfs_readahead_tree_block - attempt to readahead a child block
* Attempt to readahead a child block.
*
* @fs_info: the fs_info
* @bytenr: bytenr to read
* @owner_root: objectid of the root that owns this eb
@ -4653,7 +4659,8 @@ void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
}
/*
* btrfs_readahead_node_child - readahead a node's child block
* Readahead a node's child block.
*
* @node: parent node we're reading from
* @slot: slot in the parent node for the child we want to read
*

View File

@ -80,16 +80,16 @@ struct extent_buffer {
spinlock_t refs_lock;
atomic_t refs;
int read_mirror;
struct rcu_head rcu_head;
pid_t lock_owner;
/* >= 0 if eb belongs to a log tree, -1 otherwise */
s8 log_index;
struct rcu_head rcu_head;
struct rw_semaphore lock;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
pid_t lock_owner;
#endif
};

View File

@ -194,7 +194,7 @@ int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_encryption(leaf, item, 0);
btrfs_set_file_extent_other_encoding(leaf, item, 0);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@ -811,11 +811,12 @@ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio)
* This calls btrfs_truncate_item with the correct args based on the overlap,
* and fixes up the key as required.
*/
static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_buffer *leaf;
const u32 csum_size = fs_info->csum_size;
u64 csum_end;
@ -836,7 +837,7 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(path, new_size, 1);
btrfs_truncate_item(trans, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@ -848,10 +849,10 @@ static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(path, new_size, 0);
btrfs_truncate_item(trans, path, new_size, 0);
key->offset = end_byte;
btrfs_set_item_key_safe(fs_info, path, key);
btrfs_set_item_key_safe(trans, path, key);
} else {
BUG();
}
@ -994,7 +995,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
truncate_one_csum(fs_info, path, &key, bytenr, len);
truncate_one_csum(trans, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@ -1202,7 +1203,7 @@ extend_csum:
diff /= csum_size;
diff *= csum_size;
btrfs_extend_item(path, diff);
btrfs_extend_item(trans, path, diff);
ret = 0;
goto csum;
}
@ -1249,7 +1250,7 @@ found:
ins_size /= csum_size;
total_bytes += ins_size * fs_info->sectorsize;
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(path);
cond_resched();

View File

@ -17,6 +17,7 @@
#include <linux/uio.h>
#include <linux/iversion.h>
#include <linux/fsverity.h>
#include <linux/iomap.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
@ -368,12 +369,13 @@ next_slot:
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->start);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0) {
btrfs_init_generic_ref(&ref,
BTRFS_ADD_DELAYED_REF,
disk_bytenr, num_bytes, 0);
disk_bytenr, num_bytes, 0,
root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
new_key.objectid,
@ -405,13 +407,13 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = args->end;
btrfs_set_item_key_safe(fs_info, path, &new_key);
btrfs_set_item_key_safe(trans, path, &new_key);
extent_offset += args->end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - args->end);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += args->end - key.offset;
break;
@ -431,7 +433,7 @@ next_slot:
btrfs_set_file_extent_num_bytes(leaf, fi,
args->start - key.offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (update_refs && disk_bytenr > 0)
args->bytes_found += extent_end - args->start;
if (args->end == extent_end)
@ -463,7 +465,8 @@ delete_extent_item:
} else if (update_refs && disk_bytenr > 0) {
btrfs_init_generic_ref(&ref,
BTRFS_DROP_DELAYED_REF,
disk_bytenr, num_bytes, 0);
disk_bytenr, num_bytes, 0,
root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
key.objectid,
@ -536,7 +539,8 @@ delete_extent_item:
if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
path->slots[0]++;
}
btrfs_setup_item_for_insert(root, path, &key, args->extent_item_size);
btrfs_setup_item_for_insert(trans, root, path, &key,
args->extent_item_size);
args->extent_inserted = true;
}
@ -593,7 +597,6 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = inode->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@ -664,7 +667,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
btrfs_set_item_key_safe(fs_info, path, &new_key);
btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@ -679,7 +682,7 @@ again:
trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@ -698,7 +701,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
btrfs_set_item_key_safe(fs_info, path, &new_key);
btrfs_set_item_key_safe(trans, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@ -708,7 +711,7 @@ again:
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
}
@ -742,10 +745,10 @@ again:
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
num_bytes, 0);
num_bytes, 0, root->root_key.objectid);
btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
orig_offset, 0, false);
ret = btrfs_inc_extent_ref(trans, &ref);
@ -771,7 +774,7 @@ again:
other_start = end;
other_end = 0;
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
num_bytes, 0);
num_bytes, 0, root->root_key.objectid);
btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
0, false);
if (extent_mergeable(leaf, path->slots[0] + 1,
@ -814,7 +817,7 @@ again:
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
@ -823,7 +826,7 @@ again:
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
@ -1747,7 +1750,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
if (btrfs_inode_in_log(inode, fs_info->generation) &&
if (btrfs_inode_in_log(inode, btrfs_get_fs_generation(fs_info)) &&
list_empty(&ctx->ordered_extents))
return true;
@ -1758,7 +1761,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
* and for a fast fsync we don't wait for that, we only wait for the
* writeback to complete.
*/
if (inode->last_trans <= fs_info->last_trans_committed &&
if (inode->last_trans <= btrfs_get_last_trans_committed(fs_info) &&
(test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
list_empty(&ctx->ordered_extents)))
return true;
@ -1887,7 +1890,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
smp_mb();
if (skip_inode_logging(&ctx)) {
/*
* We've had everything committed since the last time we were
@ -2105,7 +2107,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
@ -2113,7 +2115,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
u64 num_bytes;
key.offset = offset;
btrfs_set_item_key_safe(fs_info, path, &key);
btrfs_set_item_key_safe(trans, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@ -2122,7 +2124,7 @@ static int fill_holes(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
btrfs_set_file_extent_offset(leaf, fi, 0);
btrfs_set_file_extent_generation(leaf, fi, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
goto out;
}
btrfs_release_path(path);
@ -2274,7 +2276,7 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
if (extent_info->is_new_extent)
btrfs_set_file_extent_generation(leaf, extent, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
@ -2304,7 +2306,8 @@ static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
extent_info->disk_offset,
extent_info->disk_len, 0);
extent_info->disk_len, 0,
root->root_key.objectid);
ref_offset = extent_info->file_offset - extent_info->data_offset;
btrfs_init_data_ref(&ref, root->root_key.objectid,
btrfs_ino(inode), ref_offset, 0, false);
@ -2477,7 +2480,7 @@ int btrfs_replace_file_extents(struct btrfs_inode *inode,
inode_set_mtime_to_ts(&inode->vfs_inode,
inode_set_ctime_current(&inode->vfs_inode));
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@ -2717,7 +2720,7 @@ static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
ASSERT(trans != NULL);
inode_inc_iversion(inode);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
updated_inode = true;
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
@ -2743,7 +2746,7 @@ out_only_mutex:
} else {
int ret2;
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
if (!ret)
ret = ret2;
@ -2810,7 +2813,7 @@ static int btrfs_fallocate_update_isize(struct inode *inode,
inode_set_ctime_current(inode);
i_size_write(inode, end);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
ret2 = btrfs_end_transaction(trans);
return ret ? ret : ret2;

View File

@ -57,6 +57,11 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes, bool update_stats);
static void btrfs_crc32c_final(u32 crc, u8 *result)
{
put_unaligned_le32(~crc, result);
}
static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
{
struct btrfs_free_space *info;
@ -195,7 +200,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_nlink(leaf, inode_item, 1);
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@ -213,7 +218,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@ -354,7 +359,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
if (ret)
goto fail;
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
fail:
if (locked)
@ -540,7 +545,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
if (index == 0)
offset = sizeof(u32) * io_ctl->num_pages;
crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
@ -562,7 +567,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
val = *tmp;
io_ctl_map_page(io_ctl, 0);
crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
crc = crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
btrfs_crc32c_final(crc, (u8 *)&crc);
if (val != crc) {
btrfs_err_rl(io_ctl->fs_info,
@ -1185,7 +1190,7 @@ update_cache_item(struct btrfs_trans_handle *trans,
btrfs_set_free_space_entries(leaf, header, entries);
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
return 0;
@ -1321,7 +1326,7 @@ out:
"failed to write free space cache for block group %llu error %d",
block_group->start, ret);
}
btrfs_update_inode(trans, root, BTRFS_I(inode));
btrfs_update_inode(trans, BTRFS_I(inode));
if (block_group) {
/* the dirty list is protected by the dirty_bgs_lock */
@ -1362,7 +1367,6 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
/*
* Write out cached info to an inode.
*
* @root: root the inode belongs to
* @inode: freespace inode we are writing out
* @ctl: free space cache we are going to write out
* @block_group: block_group for this cache if it belongs to a block_group
@ -1373,7 +1377,7 @@ int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
* on mount. This will return 0 if it was successful in writing the cache out,
* or an errno if it was not.
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
static int __btrfs_write_out_cache(struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_block_group *block_group,
struct btrfs_io_ctl *io_ctl,
@ -1506,7 +1510,7 @@ out:
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
}
btrfs_update_inode(trans, root, BTRFS_I(inode));
btrfs_update_inode(trans, BTRFS_I(inode));
if (must_iput)
iput(inode);
return ret;
@ -1532,8 +1536,8 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return 0;
ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
block_group, &block_group->io_ctl, trans);
ret = __btrfs_write_out_cache(inode, ctl, block_group,
&block_group->io_ctl, trans);
if (ret) {
btrfs_debug(fs_info,
"failed to write free space cache for block group %llu error %d",

View File

@ -89,7 +89,7 @@ static int add_new_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_free_space_info);
btrfs_set_free_space_extent_count(leaf, info, 0);
btrfs_set_free_space_flags(leaf, info, 0);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@ -287,7 +287,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
flags |= BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
if (extent_count != expected_extent_count) {
@ -324,7 +324,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, bitmap_cursor, ptr,
data_size);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
i += extent_size;
@ -430,7 +430,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
flags &= ~BTRFS_FREE_SPACE_USING_BITMAPS;
btrfs_set_free_space_flags(leaf, info, flags);
expected_extent_count = btrfs_free_space_extent_count(leaf, info);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
nrbits = block_group->length >> block_group->fs_info->sectorsize_bits;
@ -495,7 +495,7 @@ static int update_free_space_extent_count(struct btrfs_trans_handle *trans,
extent_count += new_extents;
btrfs_set_free_space_extent_count(path->nodes[0], info, extent_count);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
if (!(flags & BTRFS_FREE_SPACE_USING_BITMAPS) &&
@ -533,7 +533,8 @@ int free_space_test_bit(struct btrfs_block_group *block_group,
return !!extent_buffer_test_bit(leaf, ptr, i);
}
static void free_space_set_bits(struct btrfs_block_group *block_group,
static void free_space_set_bits(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
@ -563,7 +564,7 @@ static void free_space_set_bits(struct btrfs_block_group *block_group,
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
extent_buffer_bitmap_clear(leaf, ptr, first, last - first);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
*size -= end - *start;
*start = end;
@ -656,7 +657,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
cur_start = start;
cur_size = size;
while (1) {
free_space_set_bits(block_group, path, &cur_start, &cur_size,
free_space_set_bits(trans, block_group, path, &cur_start, &cur_size,
!remove);
if (cur_size == 0)
break;

View File

@ -139,6 +139,12 @@ enum {
*/
BTRFS_FS_FEATURE_CHANGED,
/*
* Indicate that we have found a tree block which is only aligned to
* sectorsize, but not to nodesize. This should be rare nowadays.
*/
BTRFS_FS_UNALIGNED_TREE_BLOCK,
#if BITS_PER_LONG == 32
/* Indicate if we have error/warn message printed on 32bit systems */
BTRFS_FS_32BIT_ERROR,
@ -171,19 +177,17 @@ enum {
BTRFS_MOUNT_AUTO_DEFRAG = (1UL << 16),
BTRFS_MOUNT_USEBACKUPROOT = (1UL << 17),
BTRFS_MOUNT_SKIP_BALANCE = (1UL << 18),
BTRFS_MOUNT_CHECK_INTEGRITY = (1UL << 19),
BTRFS_MOUNT_CHECK_INTEGRITY_DATA = (1UL << 20),
BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 21),
BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 22),
BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 23),
BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 24),
BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 25),
BTRFS_MOUNT_NOLOGREPLAY = (1UL << 26),
BTRFS_MOUNT_REF_VERIFY = (1UL << 27),
BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 28),
BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 29),
BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 30),
BTRFS_MOUNT_NODISCARD = (1UL << 31),
BTRFS_MOUNT_PANIC_ON_FATAL_ERROR = (1UL << 19),
BTRFS_MOUNT_RESCAN_UUID_TREE = (1UL << 20),
BTRFS_MOUNT_FRAGMENT_DATA = (1UL << 21),
BTRFS_MOUNT_FRAGMENT_METADATA = (1UL << 22),
BTRFS_MOUNT_FREE_SPACE_TREE = (1UL << 23),
BTRFS_MOUNT_NOLOGREPLAY = (1UL << 24),
BTRFS_MOUNT_REF_VERIFY = (1UL << 25),
BTRFS_MOUNT_DISCARD_ASYNC = (1UL << 26),
BTRFS_MOUNT_IGNOREBADROOTS = (1UL << 27),
BTRFS_MOUNT_IGNOREDATACSUMS = (1UL << 28),
BTRFS_MOUNT_NODISCARD = (1UL << 29),
};
/*
@ -216,7 +220,8 @@ enum {
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
BTRFS_FEATURE_INCOMPAT_ZONED)
BTRFS_FEATURE_INCOMPAT_ZONED | \
BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA)
#ifdef CONFIG_BTRFS_DEBUG
/*
@ -225,6 +230,7 @@ enum {
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE | \
BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
#else
@ -369,6 +375,7 @@ struct btrfs_fs_info {
struct btrfs_root *uuid_root;
struct btrfs_root *data_reloc_root;
struct btrfs_root *block_group_root;
struct btrfs_root *stripe_root;
/* The log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
@ -409,7 +416,17 @@ struct btrfs_fs_info {
struct btrfs_block_rsv empty_block_rsv;
/*
* Updated while holding the lock 'trans_lock'. Due to the life cycle of
* a transaction, it can be directly read while holding a transaction
* handle, everywhere else must be read with btrfs_get_fs_generation().
* Should always be updated using btrfs_set_fs_generation().
*/
u64 generation;
/*
* Always use btrfs_get_last_trans_committed() and
* btrfs_set_last_trans_committed() to read and update this field.
*/
u64 last_trans_committed;
/*
* Generation of the last transaction used for block group relocation
@ -645,9 +662,6 @@ struct btrfs_fs_info {
struct btrfs_discard_ctl discard_ctl;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask;
#endif
/* Is qgroup tracking in a consistent state? */
u64 qgroup_flags;
@ -683,6 +697,7 @@ struct btrfs_fs_info {
/* Protected by qgroup_rescan_lock */
bool qgroup_rescan_running;
u8 qgroup_drop_subtree_thres;
u64 qgroup_enable_gen;
/*
* If this is not 0, then it indicates a serious filesystem error has
@ -812,6 +827,26 @@ struct btrfs_fs_info {
#endif
};
static inline u64 btrfs_get_fs_generation(const struct btrfs_fs_info *fs_info)
{
return READ_ONCE(fs_info->generation);
}
static inline void btrfs_set_fs_generation(struct btrfs_fs_info *fs_info, u64 gen)
{
WRITE_ONCE(fs_info->generation, gen);
}
static inline u64 btrfs_get_last_trans_committed(const struct btrfs_fs_info *fs_info)
{
return READ_ONCE(fs_info->last_trans_committed);
}
static inline void btrfs_set_last_trans_committed(struct btrfs_fs_info *fs_info, u64 gen)
{
WRITE_ONCE(fs_info->last_trans_committed, gen);
}
static inline void btrfs_set_last_root_drop_gen(struct btrfs_fs_info *fs_info,
u64 gen)
{

View File

@ -167,7 +167,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
btrfs_truncate_item(path, item_size - del_len, 1);
btrfs_truncate_item(trans, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
@ -229,7 +229,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
btrfs_truncate_item(path, item_size - sub_item_len, 1);
btrfs_truncate_item(trans, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@ -247,7 +247,7 @@ out:
}
/*
* btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
* Insert an extended inode ref into a tree.
*
* The caller must have checked against BTRFS_LINK_MAX already.
*/
@ -282,7 +282,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
name))
goto out;
btrfs_extend_item(path, ins_len);
btrfs_extend_item(trans, path, ins_len);
ret = 0;
}
if (ret < 0)
@ -299,7 +299,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)&extref->name;
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
@ -338,7 +338,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size(path->nodes[0], path->slots[0]);
btrfs_extend_item(path, ins_len);
btrfs_extend_item(trans, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
@ -364,7 +364,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
@ -591,7 +591,7 @@ search_again:
num_dec = (orig_num_bytes - extent_num_bytes);
if (extent_start != 0)
control->sub_bytes += num_dec;
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
} else {
extent_num_bytes =
btrfs_file_extent_disk_num_bytes(leaf, fi);
@ -617,7 +617,7 @@ search_again:
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(path, size, 1);
btrfs_truncate_item(trans, path, size, 1);
} else if (!del_item) {
/*
* We have to bail so the last_size is set to
@ -676,7 +676,8 @@ delete:
bytes_deleted += extent_num_bytes;
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
extent_start, extent_num_bytes, 0);
extent_start, extent_num_bytes, 0,
root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
control->ino, extent_offset,
root->root_key.objectid, false);

View File

@ -4,6 +4,7 @@
#define BTRFS_INODE_ITEM_H
#include <linux/types.h>
#include <linux/crc32c.h>
struct btrfs_trans_handle;
struct btrfs_root;
@ -12,6 +13,7 @@ struct btrfs_key;
struct btrfs_inode_extref;
struct btrfs_inode;
struct extent_buffer;
struct fscrypt_str;
/*
* Return this if we need to call truncate_block for the last bit of the
@ -76,6 +78,12 @@ static inline void btrfs_inode_split_flags(u64 inode_item_flags,
*ro_flags = (u32)(inode_item_flags >> 32);
}
/* Figure the key offset of an extended inode ref. */
static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, int len)
{
return (u64)crc32c(parent_objectid, name, len);
}
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_truncate_control *control);

View File

@ -71,6 +71,7 @@
#include "super.h"
#include "orphan.h"
#include "backref.h"
#include "raid-stripe-tree.h"
struct btrfs_iget_args {
u64 ino;
@ -348,7 +349,7 @@ static void __cold btrfs_print_data_csum_error(struct btrfs_inode *inode,
}
/*
* btrfs_inode_lock - lock inode i_rwsem based on arguments passed
* Lock inode i_rwsem based on arguments passed.
*
* ilock_flags can have the following bit set:
*
@ -382,7 +383,7 @@ int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags)
}
/*
* btrfs_inode_unlock - unock inode i_rwsem
* Unock inode i_rwsem.
*
* ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
* to decide whether the lock acquired is shared or exclusive.
@ -573,7 +574,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
kunmap_local(kaddr);
put_page(page);
}
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@ -670,7 +671,7 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 size,
}
btrfs_update_inode_bytes(inode, size, drop_args.bytes_found);
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret && ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
goto out;
@ -1565,8 +1566,11 @@ out_unlock:
* Phase two of compressed writeback. This is the ordered portion of the code,
* which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk.
*
* If called with @do_free == true then it'll try to finish the work and free
* the work struct eventually.
*/
static noinline void submit_compressed_extents(struct btrfs_work *work)
static noinline void submit_compressed_extents(struct btrfs_work *work, bool do_free)
{
struct async_chunk *async_chunk = container_of(work, struct async_chunk,
work);
@ -1575,6 +1579,21 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
unsigned long nr_pages;
u64 alloc_hint = 0;
if (do_free) {
struct async_chunk *async_chunk;
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
async_cow = async_chunk->async_cow;
if (atomic_dec_and_test(&async_cow->num_chunks))
kvfree(async_cow);
return;
}
nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
PAGE_SHIFT;
@ -1591,21 +1610,6 @@ static noinline void submit_compressed_extents(struct btrfs_work *work)
cond_wake_up_nomb(&fs_info->async_submit_wait);
}
static noinline void async_cow_free(struct btrfs_work *work)
{
struct async_chunk *async_chunk;
struct async_cow *async_cow;
async_chunk = container_of(work, struct async_chunk, work);
btrfs_add_delayed_iput(async_chunk->inode);
if (async_chunk->blkcg_css)
css_put(async_chunk->blkcg_css);
async_cow = async_chunk->async_cow;
if (atomic_dec_and_test(&async_cow->num_chunks))
kvfree(async_cow);
}
static bool run_delalloc_compressed(struct btrfs_inode *inode,
struct page *locked_page, u64 start,
u64 end, struct writeback_control *wbc)
@ -1683,7 +1687,7 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode,
}
btrfs_init_work(&async_chunk[i].work, compress_file_range,
submit_compressed_extents, async_cow_free);
submit_compressed_extents);
nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE);
atomic_add(nr_pages, &fs_info->async_delalloc_pages);
@ -2235,8 +2239,7 @@ static bool should_nocow(struct btrfs_inode *inode, u64 start, u64 end)
{
if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) {
if (inode->defrag_bytes &&
test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG,
0, NULL))
test_range_bit_exists(&inode->io_tree, start, end, EXTENT_DEFRAG))
return false;
return true;
}
@ -2847,7 +2850,7 @@ int btrfs_writepage_cow_fixup(struct page *page)
ihold(inode);
btrfs_page_set_checked(fs_info, page, page_offset(page), PAGE_SIZE);
get_page(page);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL);
btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL);
fixup->page = page;
fixup->inode = BTRFS_I(inode);
btrfs_queue_work(fs_info->fixup_workers, &fixup->work);
@ -2912,7 +2915,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(struct btrfs_file_extent_item));
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
/*
@ -3070,7 +3073,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
trans->block_rsv = &inode->block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
ret = btrfs_update_inode_fallback(trans, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@ -3091,6 +3094,10 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &inode->block_rsv;
ret = btrfs_insert_raid_extent(trans, ordered_extent);
if (ret)
goto out;
if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
compress_type = ordered_extent->compress_type;
if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
@ -3136,7 +3143,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
&cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, root, inode);
ret = btrfs_update_inode_fallback(trans, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@ -3224,7 +3231,8 @@ out:
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
list_empty(&ordered->bioc_list))
btrfs_finish_ordered_zoned(ordered);
return btrfs_finish_one_ordered(ordered);
}
@ -3282,7 +3290,7 @@ bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
if (btrfs_is_data_reloc_root(inode->root) &&
test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM,
1, NULL)) {
NULL)) {
/* Skip the range without csum for data reloc inode */
clear_extent_bits(&inode->io_tree, file_offset, end,
EXTENT_NODATASUM);
@ -3306,7 +3314,7 @@ zeroit:
}
/*
* btrfs_add_delayed_iput - perform a delayed iput on @inode
* Perform a delayed iput on @inode.
*
* @inode: The inode we want to perform iput on
*
@ -3763,10 +3771,8 @@ static int btrfs_read_locked_inode(struct inode *inode,
inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime),
btrfs_timespec_nsec(leaf, &inode_item->ctime));
BTRFS_I(inode)->i_otime.tv_sec =
btrfs_timespec_sec(leaf, &inode_item->otime);
BTRFS_I(inode)->i_otime.tv_nsec =
btrfs_timespec_nsec(leaf, &inode_item->otime);
BTRFS_I(inode)->i_otime_sec = btrfs_timespec_sec(leaf, &inode_item->otime);
BTRFS_I(inode)->i_otime_nsec = btrfs_timespec_nsec(leaf, &inode_item->otime);
inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
@ -3792,7 +3798,7 @@ cache_index:
* This is required for both inode re-read from disk and delayed inode
* in delayed_nodes_tree.
*/
if (BTRFS_I(inode)->last_trans == fs_info->generation)
if (BTRFS_I(inode)->last_trans == btrfs_get_fs_generation(fs_info))
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
@ -3936,10 +3942,8 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_set_token_timespec_nsec(&token, &item->ctime,
inode_get_ctime_nsec(inode));
btrfs_set_token_timespec_sec(&token, &item->otime,
BTRFS_I(inode)->i_otime.tv_sec);
btrfs_set_token_timespec_nsec(&token, &item->otime,
BTRFS_I(inode)->i_otime.tv_nsec);
btrfs_set_token_timespec_sec(&token, &item->otime, BTRFS_I(inode)->i_otime_sec);
btrfs_set_token_timespec_nsec(&token, &item->otime, BTRFS_I(inode)->i_otime_nsec);
btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
btrfs_set_token_inode_generation(&token, item,
@ -3957,8 +3961,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* copy everything in the in-memory inode into the btree.
*/
static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode)
struct btrfs_inode *inode)
{
struct btrfs_inode_item *inode_item;
struct btrfs_path *path;
@ -3969,7 +3972,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1);
ret = btrfs_lookup_inode(trans, inode->root, path, &inode->location, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@ -3981,7 +3984,7 @@ static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item);
fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_set_inode_last_trans(trans, inode);
ret = 0;
failed:
@ -3992,10 +3995,10 @@ failed:
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode)
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_inode *inode)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@ -4011,23 +4014,23 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
&& !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) {
btrfs_update_root_times(trans, root);
ret = btrfs_delayed_update_inode(trans, root, inode);
ret = btrfs_delayed_update_inode(trans, inode);
if (!ret)
btrfs_set_inode_last_trans(trans, inode);
return ret;
}
return btrfs_update_inode_item(trans, root, inode);
return btrfs_update_inode_item(trans, inode);
}
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode)
struct btrfs_inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return btrfs_update_inode_item(trans, inode);
return ret;
}
@ -4133,7 +4136,7 @@ err:
inode_inc_iversion(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode(trans, root, dir);
ret = btrfs_update_inode(trans, dir);
out:
return ret;
}
@ -4147,7 +4150,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
if (!ret) {
drop_nlink(&inode->vfs_inode);
ret = btrfs_update_inode(trans, inode->root, inode);
ret = btrfs_update_inode(trans, inode);
}
return ret;
}
@ -4306,7 +4309,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode_fallback(trans, root, dir);
ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@ -4640,7 +4643,8 @@ out_notrans:
}
/*
* btrfs_truncate_block - read, zero a chunk and write a block
* Read, zero a chunk and write a block.
*
* @inode - inode that we're zeroing
* @from - the offset to start zeroing
* @len - the length to zero, 0 to zero the entire range respective to the
@ -4790,9 +4794,9 @@ out:
return ret;
}
static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
u64 offset, u64 len)
static int maybe_insert_hole(struct btrfs_inode *inode, u64 offset, u64 len)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_trans_handle *trans;
struct btrfs_drop_extents_args drop_args = { 0 };
@ -4832,7 +4836,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct btrfs_inode *inode,
btrfs_abort_transaction(trans, ret);
} else {
btrfs_update_inode_bytes(inode, 0, drop_args.bytes_found);
btrfs_update_inode(trans, root, inode);
btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
return ret;
@ -4888,8 +4892,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
err = maybe_insert_hole(root, inode, cur_offset,
hole_size);
err = maybe_insert_hole(inode, cur_offset, hole_size);
if (err)
break;
@ -4915,7 +4918,7 @@ int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size)
hole_em->orig_block_len = 0;
hole_em->ram_bytes = hole_size;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = fs_info->generation;
hole_em->generation = btrfs_get_fs_generation(fs_info);
err = btrfs_replace_extent_map_range(inode, hole_em, true);
free_extent_map(hole_em);
@ -4984,7 +4987,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, newsize);
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
pagecache_isize_extended(inode, oldsize, newsize);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans);
} else {
@ -5582,6 +5585,7 @@ static struct inode *new_simple_dir(struct inode *dir,
struct btrfs_key *key,
struct btrfs_root *root)
{
struct timespec64 ts;
struct inode *inode = new_inode(dir->i_sb);
if (!inode)
@ -5600,9 +5604,13 @@ static struct inode *new_simple_dir(struct inode *dir,
inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
ts = inode_set_ctime_current(inode);
inode_set_mtime_to_ts(inode, ts);
inode_set_atime_to_ts(inode, inode_get_atime(dir));
BTRFS_I(inode)->i_otime = inode_get_mtime(inode);
BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
inode->i_uid = dir->i_uid;
inode->i_gid = dir->i_gid;
@ -6000,15 +6008,15 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && (ret == -ENOSPC || ret == -EDQUOT)) {
ret = btrfs_update_inode(trans, inode);
if (ret == -ENOSPC || ret == -EDQUOT) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans);
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
}
btrfs_end_transaction(trans);
if (inode->delayed_node)
@ -6024,7 +6032,7 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
static int btrfs_update_time(struct inode *inode, int flags)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
bool dirty = flags & ~S_VERSION;
bool dirty;
if (btrfs_root_readonly(root))
return -EROFS;
@ -6160,6 +6168,7 @@ static void btrfs_inherit_iflags(struct btrfs_inode *inode, struct btrfs_inode *
int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
struct btrfs_new_inode_args *args)
{
struct timespec64 ts;
struct inode *dir = args->dir;
struct inode *inode = args->inode;
const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
@ -6277,8 +6286,9 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
goto discard;
}
simple_inode_init_ts(inode);
BTRFS_I(inode)->i_otime = inode_get_mtime(inode);
ts = simple_inode_init_ts(inode);
BTRFS_I(inode)->i_otime_sec = ts.tv_sec;
BTRFS_I(inode)->i_otime_nsec = ts.tv_nsec;
/*
* We're going to fill the inode item now, so at this point the inode
@ -6309,7 +6319,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
}
}
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
/*
* We don't need the path anymore, plus inheriting properties, adding
* ACLs, security xattrs, orphan item or adding the link, will result in
@ -6446,7 +6456,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
inode_set_mtime_to_ts(&parent_inode->vfs_inode,
inode_set_ctime_current(&parent_inode->vfs_inode));
ret = btrfs_update_inode(trans, root, parent_inode);
ret = btrfs_update_inode(trans, parent_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
return ret;
@ -6597,7 +6607,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
} else {
struct dentry *parent = dentry->d_parent;
err = btrfs_update_inode(trans, root, BTRFS_I(inode));
err = btrfs_update_inode(trans, BTRFS_I(inode));
if (err)
goto fail;
if (inode->i_nlink == 1) {
@ -7102,8 +7112,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
range_end = round_up(offset + nocow_args.num_bytes,
root->fs_info->sectorsize) - 1;
ret = test_range_bit(io_tree, offset, range_end,
EXTENT_DELALLOC, 0, NULL);
ret = test_range_bit_exists(io_tree, offset, range_end, EXTENT_DELALLOC);
if (ret) {
ret = -EAGAIN;
goto out;
@ -8004,11 +8013,11 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
EXTENT_DEFRAG, &cached_state);
spin_lock_irq(&inode->ordered_tree.lock);
spin_lock_irq(&inode->ordered_tree_lock);
set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
ordered->truncated_len = min(ordered->truncated_len,
cur - ordered->file_offset);
spin_unlock_irq(&inode->ordered_tree.lock);
spin_unlock_irq(&inode->ordered_tree_lock);
/*
* If the ordered extent has finished, we're safe to delete all
@ -8338,7 +8347,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
if (ret != -ENOSPC && ret != -EAGAIN)
break;
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret)
break;
@ -8391,7 +8400,7 @@ static int btrfs_truncate(struct btrfs_inode *inode, bool skip_writeback)
int ret2;
trans->block_rsv = &fs_info->trans_block_rsv;
ret2 = btrfs_update_inode(trans, root, inode);
ret2 = btrfs_update_inode(trans, inode);
if (ret2 && !ret)
ret = ret2;
@ -8480,8 +8489,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->delayed_node = NULL;
ei->i_otime.tv_sec = 0;
ei->i_otime.tv_nsec = 0;
ei->i_otime_sec = 0;
ei->i_otime_nsec = 0;
inode = &ei->vfs_inode;
extent_map_tree_init(&ei->extent_tree);
@ -8490,7 +8499,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
extent_io_tree_init(fs_info, &ei->file_extent_tree,
IO_TREE_INODE_FILE_EXTENT);
mutex_init(&ei->log_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
spin_lock_init(&ei->ordered_tree_lock);
ei->ordered_tree = RB_ROOT;
ei->ordered_tree_last = NULL;
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->delayed_iput);
RB_CLEAR_NODE(&ei->rb_node);
@ -8633,8 +8644,8 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
u32 bi_ro_flags = BTRFS_I(inode)->ro_flags;
stat->result_mask |= STATX_BTIME;
stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec;
stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec;
stat->btime.tv_sec = BTRFS_I(inode)->i_otime_sec;
stat->btime.tv_nsec = BTRFS_I(inode)->i_otime_nsec;
if (bi_flags & BTRFS_INODE_APPEND)
stat->attributes |= STATX_ATTR_APPEND;
if (bi_flags & BTRFS_INODE_COMPRESS)
@ -8822,7 +8833,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(old_dentry->d_inode),
old_name, &old_rename_ctx);
if (!ret)
ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -8837,7 +8848,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
BTRFS_I(new_dentry->d_inode),
new_name, &new_rename_ctx);
if (!ret)
ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
ret = btrfs_update_inode(trans, BTRFS_I(new_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -9082,7 +9093,7 @@ static int btrfs_rename(struct mnt_idmap *idmap,
BTRFS_I(d_inode(old_dentry)),
&old_fname.disk_name, &rename_ctx);
if (!ret)
ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
ret = btrfs_update_inode(trans, BTRFS_I(old_inode));
}
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -9207,7 +9218,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
init_completion(&work->completion);
INIT_LIST_HEAD(&work->list);
work->inode = inode;
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL);
btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL);
return work;
}
@ -9445,7 +9456,7 @@ static int btrfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
ptr = btrfs_file_extent_inline_start(ei);
write_extent_buffer(leaf, symname, ptr, name_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_free_path(path);
d_instantiate_new(dentry, inode);
@ -9638,7 +9649,7 @@ next:
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);

View File

@ -385,7 +385,7 @@ update_flags:
btrfs_sync_inode_flags_to_i_flags(inode);
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
out_end_trans:
btrfs_end_transaction(trans);
@ -652,18 +652,18 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
/* Tree log can't currently deal with an inode which is a new root. */
btrfs_set_log_full_commit(trans);
ret = btrfs_qgroup_inherit(trans, 0, objectid, inherit);
ret = btrfs_qgroup_inherit(trans, 0, objectid, root->root_key.objectid, inherit);
if (ret)
goto out;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
BTRFS_NESTING_NORMAL);
0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf);
goto out;
}
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
inode_item = &root_item->inode;
btrfs_set_stack_inode_generation(inode_item, 1);
@ -2635,6 +2635,12 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
return -EINVAL;
}
if (fs_info->fs_devices->temp_fsid) {
btrfs_err(fs_info,
"device add not supported on cloned temp-fsid mount");
return -EINVAL;
}
if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) {
if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD))
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
@ -2820,7 +2826,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
}
if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) {
fi_args->generation = fs_info->generation;
fi_args->generation = btrfs_get_fs_generation(fs_info);
fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION;
}
@ -2945,7 +2951,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key);
btrfs_set_dir_item_key(path->nodes[0], di, &disk_key);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL);
@ -3129,7 +3135,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
return PTR_ERR(trans);
/* No running transaction, don't bother */
transid = root->fs_info->last_trans_committed;
transid = btrfs_get_last_trans_committed(root->fs_info);
goto out;
}
transid = trans->transid;
@ -3695,7 +3701,8 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
switch (sa->cmd) {
case BTRFS_QUOTA_CTL_ENABLE:
ret = btrfs_quota_enable(fs_info);
case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
ret = btrfs_quota_enable(fs_info, sa);
break;
case BTRFS_QUOTA_CTL_DISABLE:
ret = btrfs_quota_disable(fs_info);

View File

@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/page-flags.h>
#include <asm/bug.h>
#include <trace/events/btrfs.h>
#include "misc.h"
#include "ctree.h"
#include "extent_io.h"
@ -73,6 +74,7 @@ static struct btrfs_lockdep_keyset {
{ .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") },
{ .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") },
{ .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") },
{ .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") },
{ .id = 0, DEFINE_NAME("tree") },
};
@ -102,6 +104,15 @@ void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buff
#endif
#ifdef CONFIG_BTRFS_DEBUG
static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner)
{
eb->lock_owner = owner;
}
#else
static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { }
#endif
/*
* Extent buffer locking
* =====================
@ -164,7 +175,7 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
{
if (down_write_trylock(&eb->lock)) {
eb->lock_owner = current->pid;
btrfs_set_eb_lock_owner(eb, current->pid);
trace_btrfs_try_tree_write_lock(eb);
return 1;
}
@ -181,7 +192,8 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
}
/*
* __btrfs_tree_lock - lock eb for write
* Lock eb for write.
*
* @eb: the eb to lock
* @nest: the nesting to use for the lock
*
@ -196,7 +208,7 @@ void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
start_ns = ktime_get_ns();
down_write_nested(&eb->lock, nest);
eb->lock_owner = current->pid;
btrfs_set_eb_lock_owner(eb, current->pid);
trace_btrfs_tree_lock(eb, start_ns);
}
@ -211,7 +223,7 @@ void btrfs_tree_lock(struct extent_buffer *eb)
void btrfs_tree_unlock(struct extent_buffer *eb)
{
trace_btrfs_tree_unlock(eb);
eb->lock_owner = 0;
btrfs_set_eb_lock_owner(eb, 0);
up_write(&eb->lock);
}

View File

@ -72,11 +72,11 @@ static void btrfs_state_to_string(const struct btrfs_fs_info *info, char *buf)
* over the error. Each subsequent error that doesn't have any context
* of the original error should use EROFS when handling BTRFS_FS_STATE_ERROR.
*/
const char * __attribute_const__ btrfs_decode_error(int errno)
const char * __attribute_const__ btrfs_decode_error(int error)
{
char *errstr = "unknown";
switch (errno) {
switch (error) {
case -ENOENT: /* -2 */
errstr = "No such entry";
break;
@ -110,12 +110,12 @@ const char * __attribute_const__ btrfs_decode_error(int errno)
}
/*
* __btrfs_handle_fs_error decodes expected errors from the caller and
* invokes the appropriate error response.
* Decodes expected errors from the caller and invokes the appropriate error
* response.
*/
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...)
unsigned int line, int error, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
#ifdef CONFIG_PRINTK
@ -132,11 +132,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Special case: if the error is EROFS, and we're already under
* SB_RDONLY, then it is safe here.
*/
if (errno == -EROFS && sb_rdonly(sb))
if (error == -EROFS && sb_rdonly(sb))
return;
#ifdef CONFIG_PRINTK
errstr = btrfs_decode_error(errno);
errstr = btrfs_decode_error(error);
btrfs_state_to_string(fs_info, statestr);
if (fmt) {
struct va_format vaf;
@ -147,11 +147,11 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
vaf.va = &args;
pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s (%pV)\n",
sb->s_id, statestr, function, line, errno, errstr, &vaf);
sb->s_id, statestr, function, line, error, errstr, &vaf);
va_end(args);
} else {
pr_crit("BTRFS: error (device %s%s) in %s:%d: errno=%d %s\n",
sb->s_id, statestr, function, line, errno, errstr);
sb->s_id, statestr, function, line, error, errstr);
}
#endif
@ -159,7 +159,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Today we only save the error info to memory. Long term we'll also
* send it down to the disk.
*/
WRITE_ONCE(fs_info->fs_error, errno);
WRITE_ONCE(fs_info->fs_error, error);
/* Don't go through full error handling during mount. */
if (!(sb->s_flags & SB_BORN))
@ -283,12 +283,12 @@ void __cold btrfs_err_32bit_limit(struct btrfs_fs_info *fs_info)
#endif
/*
* __btrfs_panic decodes unexpected, fatal errors from the caller, issues an
* alert, and either panics or BUGs, depending on mount options.
* Decode unexpected, fatal errors from the caller, issue an alert, and either
* panic or BUGs, depending on mount options.
*/
__cold
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...)
unsigned int line, int error, const char *fmt, ...)
{
char *s_id = "<unknown>";
const char *errstr;
@ -301,13 +301,13 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
va_start(args, fmt);
vaf.va = &args;
errstr = btrfs_decode_error(errno);
errstr = btrfs_decode_error(error);
if (fs_info && (btrfs_test_opt(fs_info, PANIC_ON_FATAL_ERROR)))
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (errno=%d %s)\n",
s_id, function, line, &vaf, errno, errstr);
s_id, function, line, &vaf, error, errstr);
btrfs_crit(fs_info, "panic in %s:%d: %pV (errno=%d %s)",
function, line, &vaf, errno, errstr);
function, line, &vaf, error, errstr);
va_end(args);
/* Caller calls BUG() */
}

View File

@ -184,25 +184,25 @@ do { \
__printf(5, 6)
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
unsigned int line, int error, const char *fmt, ...);
const char * __attribute_const__ btrfs_decode_error(int errno);
const char * __attribute_const__ btrfs_decode_error(int error);
#define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \
#define btrfs_handle_fs_error(fs_info, error, fmt, args...) \
__btrfs_handle_fs_error((fs_info), __func__, __LINE__, \
(errno), fmt, ##args)
(error), fmt, ##args)
__printf(5, 6)
__cold
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
unsigned int line, int error, const char *fmt, ...);
/*
* If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
* will panic(). Otherwise we BUG() here.
*/
#define btrfs_panic(fs_info, errno, fmt, args...) \
#define btrfs_panic(fs_info, error, fmt, args...) \
do { \
__btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
__btrfs_panic(fs_info, __func__, __LINE__, error, fmt, ##args); \
BUG(); \
} while (0)

View File

@ -124,25 +124,24 @@ static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
* look find the first ordered struct that has this offset, otherwise
* the first one less than this offset
*/
static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
u64 file_offset)
static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode,
u64 file_offset)
{
struct rb_root *root = &tree->tree;
struct rb_node *prev = NULL;
struct rb_node *ret;
struct btrfs_ordered_extent *entry;
if (tree->last) {
entry = rb_entry(tree->last, struct btrfs_ordered_extent,
if (inode->ordered_tree_last) {
entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent,
rb_node);
if (in_range(file_offset, entry->file_offset, entry->num_bytes))
return tree->last;
return inode->ordered_tree_last;
}
ret = __tree_search(root, file_offset, &prev);
ret = __tree_search(&inode->ordered_tree, file_offset, &prev);
if (!ret)
ret = prev;
if (ret)
tree->last = ret;
inode->ordered_tree_last = ret;
return ret;
}
@ -191,6 +190,7 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
INIT_LIST_HEAD(&entry->log_list);
INIT_LIST_HEAD(&entry->root_extent_list);
INIT_LIST_HEAD(&entry->work_list);
INIT_LIST_HEAD(&entry->bioc_list);
init_completion(&entry->completion);
/*
@ -208,7 +208,6 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
{
struct btrfs_inode *inode = BTRFS_I(entry->inode);
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
@ -221,13 +220,14 @@ static void insert_ordered_extent(struct btrfs_ordered_extent *entry)
/* One ref for the tree. */
refcount_inc(&entry->refs);
spin_lock_irq(&tree->lock);
node = tree_insert(&tree->tree, entry->file_offset, &entry->rb_node);
spin_lock_irq(&inode->ordered_tree_lock);
node = tree_insert(&inode->ordered_tree, entry->file_offset,
&entry->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"inconsistency in ordered tree at offset %llu",
entry->file_offset);
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
spin_lock(&root->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
@ -287,12 +287,11 @@ struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum)
{
struct btrfs_ordered_inode_tree *tree;
struct btrfs_inode *inode = BTRFS_I(entry->inode);
tree = &BTRFS_I(entry->inode)->ordered_tree;
spin_lock_irq(&tree->lock);
spin_lock_irq(&inode->ordered_tree_lock);
list_add_tail(&sum->list, &entry->list);
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
}
static void finish_ordered_fn(struct btrfs_work *work)
@ -310,7 +309,7 @@ static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
lockdep_assert_held(&inode->ordered_tree.lock);
lockdep_assert_held(&inode->ordered_tree_lock);
if (page) {
ASSERT(page->mapping);
@ -364,7 +363,7 @@ static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered)
struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ?
fs_info->endio_freespace_worker : fs_info->endio_write_workers;
btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL);
btrfs_init_work(&ordered->work, finish_ordered_fn, NULL);
btrfs_queue_work(wq, &ordered->work);
}
@ -378,9 +377,9 @@ bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered,
trace_btrfs_finish_ordered_extent(inode, file_offset, len, uptodate);
spin_lock_irqsave(&inode->ordered_tree.lock, flags);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate);
spin_unlock_irqrestore(&inode->ordered_tree.lock, flags);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
if (ret)
btrfs_queue_ordered_fn(ordered);
@ -404,7 +403,6 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
struct page *page, u64 file_offset,
u64 num_bytes, bool uptodate)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
@ -414,13 +412,13 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
file_offset + num_bytes - 1,
uptodate);
spin_lock_irqsave(&tree->lock, flags);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
while (cur < file_offset + num_bytes) {
u64 entry_end;
u64 end;
u32 len;
node = tree_search(tree, cur);
node = ordered_tree_search(inode, cur);
/* No ordered extents at all */
if (!node)
break;
@ -467,13 +465,13 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
len = end + 1 - cur;
if (can_finish_ordered_extent(entry, page, cur, len, uptodate)) {
spin_unlock_irqrestore(&tree->lock, flags);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
btrfs_queue_ordered_fn(entry);
spin_lock_irqsave(&tree->lock, flags);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
}
cur += len;
}
spin_unlock_irqrestore(&tree->lock, flags);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
}
/*
@ -497,19 +495,18 @@ bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
bool finished = false;
spin_lock_irqsave(&tree->lock, flags);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
if (cached && *cached) {
entry = *cached;
goto have_entry;
}
node = tree_search(tree, file_offset);
node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@ -540,7 +537,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
spin_unlock_irqrestore(&tree->lock, flags);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return finished;
}
@ -578,7 +575,6 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = btrfs_inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
@ -609,16 +605,15 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
fs_info->delalloc_batch);
tree = &btrfs_inode->ordered_tree;
spin_lock_irq(&tree->lock);
spin_lock_irq(&btrfs_inode->ordered_tree_lock);
node = &entry->rb_node;
rb_erase(node, &tree->tree);
rb_erase(node, &btrfs_inode->ordered_tree);
RB_CLEAR_NODE(node);
if (tree->last == node)
tree->last = NULL;
if (btrfs_inode->ordered_tree_last == node)
btrfs_inode->ordered_tree_last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
pending = test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags);
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&btrfs_inode->ordered_tree_lock);
/*
* The current running transaction is waiting on us, we need to let it
@ -711,7 +706,7 @@ u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
spin_unlock(&root->ordered_extent_lock);
btrfs_init_work(&ordered->flush_work,
btrfs_run_ordered_extent_work, NULL, NULL);
btrfs_run_ordered_extent_work, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
@ -875,14 +870,12 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
unsigned long flags;
tree = &inode->ordered_tree;
spin_lock_irqsave(&tree->lock, flags);
node = tree_search(tree, file_offset);
spin_lock_irqsave(&inode->ordered_tree_lock, flags);
node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@ -894,7 +887,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
trace_btrfs_ordered_extent_lookup(inode, entry);
}
out:
spin_unlock_irqrestore(&tree->lock, flags);
spin_unlock_irqrestore(&inode->ordered_tree_lock, flags);
return entry;
}
@ -904,15 +897,13 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
tree = &inode->ordered_tree;
spin_lock_irq(&tree->lock);
node = tree_search(tree, file_offset);
spin_lock_irq(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node) {
node = tree_search(tree, file_offset + len);
node = ordered_tree_search(inode, file_offset + len);
if (!node)
goto out;
}
@ -936,7 +927,7 @@ out:
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_range(inode, entry);
}
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@ -947,13 +938,12 @@ out:
void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct list_head *list)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *n;
ASSERT(inode_is_locked(&inode->vfs_inode));
spin_lock_irq(&tree->lock);
for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
spin_lock_irq(&inode->ordered_tree_lock);
for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) {
struct btrfs_ordered_extent *ordered;
ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
@ -966,7 +956,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
refcount_inc(&ordered->refs);
trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
}
/*
@ -976,13 +966,11 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
tree = &inode->ordered_tree;
spin_lock_irq(&tree->lock);
node = tree_search(tree, file_offset);
spin_lock_irq(&inode->ordered_tree_lock);
node = ordered_tree_search(inode, file_offset);
if (!node)
goto out;
@ -990,7 +978,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
refcount_inc(&entry->refs);
trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@ -1006,15 +994,14 @@ out:
struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
struct btrfs_inode *inode, u64 file_offset, u64 len)
{
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct rb_node *node;
struct rb_node *cur;
struct rb_node *prev;
struct rb_node *next;
struct btrfs_ordered_extent *entry = NULL;
spin_lock_irq(&tree->lock);
node = tree->tree.rb_node;
spin_lock_irq(&inode->ordered_tree_lock);
node = inode->ordered_tree.rb_node;
/*
* Here we don't want to use tree_search() which will use tree->last
* and screw up the search order.
@ -1068,7 +1055,7 @@ out:
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
}
spin_unlock_irq(&tree->lock);
spin_unlock_irq(&inode->ordered_tree_lock);
return entry;
}
@ -1147,7 +1134,6 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
struct btrfs_ordered_extent *ordered, u64 len)
{
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
struct btrfs_ordered_inode_tree *tree = &inode->ordered_tree;
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
u64 file_offset = ordered->file_offset;
@ -1187,13 +1173,13 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
refcount_inc(&new->refs);
spin_lock_irq(&root->ordered_extent_lock);
spin_lock(&tree->lock);
spin_lock(&inode->ordered_tree_lock);
/* Remove from tree once */
node = &ordered->rb_node;
rb_erase(node, &tree->tree);
rb_erase(node, &inode->ordered_tree);
RB_CLEAR_NODE(node);
if (tree->last == node)
tree->last = NULL;
if (inode->ordered_tree_last == node)
inode->ordered_tree_last = NULL;
ordered->file_offset += len;
ordered->disk_bytenr += len;
@ -1224,18 +1210,19 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
}
/* Re-insert the node */
node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
node = tree_insert(&inode->ordered_tree, ordered->file_offset,
&ordered->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"zoned: inconsistency in ordered tree at offset %llu",
ordered->file_offset);
node = tree_insert(&tree->tree, new->file_offset, &new->rb_node);
node = tree_insert(&inode->ordered_tree, new->file_offset, &new->rb_node);
if (node)
btrfs_panic(fs_info, -EEXIST,
"zoned: inconsistency in ordered tree at offset %llu",
new->file_offset);
spin_unlock(&tree->lock);
spin_unlock(&inode->ordered_tree_lock);
list_add_tail(&new->root_extent_list, &root->ordered_extents);
root->nr_ordered_extents++;

View File

@ -6,13 +6,6 @@
#ifndef BTRFS_ORDERED_DATA_H
#define BTRFS_ORDERED_DATA_H
/* one of these per inode */
struct btrfs_ordered_inode_tree {
spinlock_t lock;
struct rb_root tree;
struct rb_node *last;
};
struct btrfs_ordered_sum {
/*
* Logical start address and length for of the blocks covered by
@ -151,15 +144,9 @@ struct btrfs_ordered_extent {
struct completion completion;
struct btrfs_work flush_work;
struct list_head work_list;
};
static inline void
btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
{
spin_lock_init(&t->lock);
t->tree = RB_ROOT;
t->last = NULL;
}
struct list_head bioc_list;
};
int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent);
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);

View File

@ -9,6 +9,8 @@
#include "print-tree.h"
#include "accessors.h"
#include "tree-checker.h"
#include "volumes.h"
#include "raid-stripe-tree.h"
struct root_name_map {
u64 id;
@ -28,6 +30,7 @@ static const struct root_name_map root_map[] = {
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" },
{ BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
{ BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
@ -80,12 +83,20 @@ static void print_extent_data_ref(const struct extent_buffer *eb,
btrfs_extent_data_ref_count(eb, ref));
}
static void print_extent_owner_ref(const struct extent_buffer *eb,
const struct btrfs_extent_owner_ref *ref)
{
ASSERT(btrfs_fs_incompat(eb->fs_info, SIMPLE_QUOTA));
pr_cont("extent data owner root %llu\n", btrfs_extent_owner_ref_root_id(eb, ref));
}
static void print_extent_item(const struct extent_buffer *eb, int slot, int type)
{
struct btrfs_extent_item *ei;
struct btrfs_extent_inline_ref *iref;
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_extent_owner_ref *oref;
struct btrfs_disk_key key;
unsigned long end;
unsigned long ptr;
@ -161,6 +172,10 @@ static void print_extent_item(const struct extent_buffer *eb, int slot, int type
"\t\t\t(parent %llu not aligned to sectorsize %u)\n",
offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
oref = (struct btrfs_extent_owner_ref *)(&iref->offset);
print_extent_owner_ref(eb, oref);
break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
eb->start, type);
@ -189,6 +204,22 @@ static void print_uuid_item(const struct extent_buffer *l, unsigned long offset,
}
}
static void print_raid_stripe_key(const struct extent_buffer *eb, u32 item_size,
struct btrfs_stripe_extent *stripe)
{
const int num_stripes = btrfs_num_raid_stripes(item_size);
const u8 encoding = btrfs_stripe_extent_encoding(eb, stripe);
pr_info("\t\t\tencoding: %s\n",
(encoding && encoding < BTRFS_NR_RAID_TYPES) ?
btrfs_raid_array[encoding].raid_name : "unknown");
for (int i = 0; i < num_stripes; i++)
pr_info("\t\t\tstride %d devid %llu physical %llu\n",
i, btrfs_raid_stride_devid(eb, &stripe->strides[i]),
btrfs_raid_stride_physical(eb, &stripe->strides[i]));
}
/*
* Helper to output refs and locking status of extent buffer. Useful to debug
* race condition related problems.
@ -349,6 +380,10 @@ void btrfs_print_leaf(const struct extent_buffer *l)
print_uuid_item(l, btrfs_item_ptr_offset(l, i),
btrfs_item_size(l, i));
break;
case BTRFS_RAID_STRIPE_KEY:
print_raid_stripe_key(l, btrfs_item_size(l, i),
btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
break;
}
}
}

View File

@ -15,6 +15,7 @@
#include "fs.h"
#include "accessors.h"
#include "super.h"
#include "dir-item.h"
#define BTRFS_PROP_HANDLERS_HT_BITS 8
static DEFINE_HASHTABLE(prop_handlers_ht, BTRFS_PROP_HANDLERS_HT_BITS);

File diff suppressed because it is too large Load Diff

View File

@ -101,8 +101,15 @@
* subtree rescan for them.
*/
#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1UL << 3)
#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1UL << 4)
/*
* These flags share the flags field of the btrfs_qgroup_status_item with the
* persisted flags defined in btrfs_tree.h.
*
* To minimize the chance of collision with new persisted status flags, these
* count backwards from the MSB.
*/
#define BTRFS_QGROUP_RUNTIME_FLAG_CANCEL_RESCAN (1ULL << 63)
#define BTRFS_QGROUP_RUNTIME_FLAG_NO_ACCOUNTING (1ULL << 62)
/*
* Record a dirty extent, and info qgroup to update quota on it
@ -220,6 +227,33 @@ struct btrfs_qgroup {
struct list_head groups; /* groups this group is member of */
struct list_head members; /* groups that are members of this group */
struct list_head dirty; /* dirty groups */
/*
* For qgroup iteration usage.
*
* The iteration list should always be empty until qgroup_iterator_add()
* is called. And should be reset to empty after the iteration is
* finished.
*/
struct list_head iterator;
/*
* For nested iterator usage.
*
* Here we support at most one level of nested iterator calls like:
*
* LIST_HEAD(all_qgroups);
* {
* LIST_HEAD(local_qgroups);
* qgroup_iterator_add(local_qgroups, qg);
* qgroup_iterator_nested_add(all_qgroups, qg);
* do_some_work(local_qgroups);
* qgroup_iterator_clean(local_qgroups);
* }
* do_some_work(all_qgroups);
* qgroup_iterator_nested_clean(all_qgroups);
*/
struct list_head nested_iterator;
struct rb_node node; /* tree of qgroups */
/*
@ -235,6 +269,21 @@ struct btrfs_qgroup {
struct kobject kobj;
};
struct btrfs_squota_delta {
/* The fstree root this delta counts against. */
u64 root;
/* The number of bytes in the extent being counted. */
u64 num_bytes;
/* The number of bytes reserved for this extent. */
u64 rsv_bytes;
/* The generation the extent was created in. */
u64 generation;
/* Whether we are using or freeing the extent. */
bool is_inc;
/* Whether the extent is data or metadata. */
bool is_data;
};
static inline u64 btrfs_qgroup_subvolid(u64 qgroupid)
{
return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1));
@ -249,14 +298,23 @@ enum {
ENUM_BIT(QGROUP_FREE),
};
int btrfs_quota_enable(struct btrfs_fs_info *fs_info);
enum btrfs_qgroup_mode {
BTRFS_QGROUP_MODE_DISABLED,
BTRFS_QGROUP_MODE_FULL,
BTRFS_QGROUP_MODE_SIMPLE
};
enum btrfs_qgroup_mode btrfs_qgroup_mode(struct btrfs_fs_info *fs_info);
bool btrfs_qgroup_enabled(struct btrfs_fs_info *fs_info);
bool btrfs_qgroup_full_accounting(struct btrfs_fs_info *fs_info);
int btrfs_quota_enable(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_quota_ctl_args *quota_ctl_args);
int btrfs_quota_disable(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
bool interruptible);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst);
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src, u64 dst);
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
u64 dst);
int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid);
@ -267,80 +325,16 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op;
/*
* Inform qgroup to trace one dirty extent, its info is recorded in @record.
* So qgroup can account it at transaction committing time.
*
* No lock version, caller must acquire delayed ref lock and allocated memory,
* then call btrfs_qgroup_trace_extent_post() after exiting lock context.
*
* Return 0 for success insert
* Return >0 for existing record, caller can free @record safely.
* Error is not possible
*/
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
/*
* Post handler after qgroup_trace_extent_nolock().
*
* NOTE: Current qgroup does the expensive backref walk at transaction
* committing time with TRANS_STATE_COMMIT_DOING, this blocks incoming
* new transaction.
* This is designed to allow btrfs_find_all_roots() to get correct new_roots
* result.
*
* However for old_roots there is no need to do backref walk at that time,
* since we search commit roots to walk backref and result will always be
* correct.
*
* Due to the nature of no lock version, we can't do backref there.
* So we must call btrfs_qgroup_trace_extent_post() after exiting
* spinlock context.
*
* TODO: If we can fix and prove btrfs_find_all_roots() can get correct result
* using current root, then we can move all expensive backref walk out of
* transaction committing, but not now as qgroup accounting will be wrong again.
*/
int btrfs_qgroup_trace_extent_post(struct btrfs_trans_handle *trans,
struct btrfs_qgroup_extent_record *qrecord);
/*
* Inform qgroup to trace one dirty extent, specified by @bytenr and
* @num_bytes.
* So qgroup can account it at commit trans time.
*
* Better encapsulated version, with memory allocation and backref walk for
* commit roots.
* So this can sleep.
*
* Return 0 if the operation is done.
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
u64 num_bytes);
/*
* Inform qgroup to trace all leaf items of data
*
* Return 0 for success
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree
* blocks and data.
* The root tree block is specified by @root_eb.
*
* Normally used by relocation(tree block swap) and subvolume deletion.
*
* Return 0 for success
* Return <0 for error(ENOMEM or tree search error)
*/
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
@ -350,7 +344,8 @@ int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans);
int btrfs_run_qgroups(struct btrfs_trans_handle *trans);
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
u64 objectid, struct btrfs_qgroup_inherit *inherit);
u64 objectid, u64 inode_rootid,
struct btrfs_qgroup_inherit *inherit);
void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes,
enum btrfs_qgroup_rsv_type type);
@ -408,20 +403,8 @@ static inline void btrfs_qgroup_free_meta_prealloc(struct btrfs_root *root,
BTRFS_QGROUP_RSV_META_PREALLOC);
}
/*
* Per-transaction meta reservation should be all freed at transaction commit
* time
*/
void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
/*
* Convert @num_bytes of META_PREALLOCATED reservation to META_PERTRANS.
*
* This is called when preallocated meta reservation needs to be used.
* Normally after btrfs_join_transaction() call.
*/
void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
/* btrfs_qgroup_swapped_blocks related functions */
@ -439,5 +422,7 @@ int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info);
int btrfs_record_squota_delta(struct btrfs_fs_info *fs_info,
struct btrfs_squota_delta *delta);
#endif

274
fs/btrfs/raid-stripe-tree.c Normal file
View File

@ -0,0 +1,274 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2023 Western Digital Corporation or its affiliates.
*/
#include <linux/btrfs_tree.h>
#include "ctree.h"
#include "fs.h"
#include "accessors.h"
#include "transaction.h"
#include "disk-io.h"
#include "raid-stripe-tree.h"
#include "volumes.h"
#include "misc.h"
#include "print-tree.h"
int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *stripe_root = fs_info->stripe_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *leaf;
u64 found_start;
u64 found_end;
u64 end = start + length;
int slot;
int ret;
if (!stripe_root)
return 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) {
key.objectid = start;
key.type = BTRFS_RAID_STRIPE_KEY;
key.offset = length;
ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1);
if (ret < 0)
break;
if (ret > 0) {
ret = 0;
if (path->slots[0] == 0)
break;
path->slots[0]--;
}
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &key, slot);
found_start = key.objectid;
found_end = found_start + key.offset;
/* That stripe ends before we start, we're done. */
if (found_end <= start)
break;
trace_btrfs_raid_extent_delete(fs_info, start, end,
found_start, found_end);
ASSERT(found_start >= start && found_end <= end);
ret = btrfs_del_item(trans, stripe_root, path);
if (ret)
break;
btrfs_release_path(path);
}
btrfs_free_path(path);
return ret;
}
static int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_io_context *bioc)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_key stripe_key;
struct btrfs_root *stripe_root = fs_info->stripe_root;
const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type);
u8 encoding = btrfs_bg_flags_to_raid_index(bioc->map_type);
struct btrfs_stripe_extent *stripe_extent;
const size_t item_size = struct_size(stripe_extent, strides, num_stripes);
int ret;
stripe_extent = kzalloc(item_size, GFP_NOFS);
if (!stripe_extent) {
btrfs_abort_transaction(trans, -ENOMEM);
btrfs_end_transaction(trans);
return -ENOMEM;
}
trace_btrfs_insert_one_raid_extent(fs_info, bioc->logical, bioc->size,
num_stripes);
btrfs_set_stack_stripe_extent_encoding(stripe_extent, encoding);
for (int i = 0; i < num_stripes; i++) {
u64 devid = bioc->stripes[i].dev->devid;
u64 physical = bioc->stripes[i].physical;
u64 length = bioc->stripes[i].length;
struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i];
if (length == 0)
length = bioc->size;
btrfs_set_stack_raid_stride_devid(raid_stride, devid);
btrfs_set_stack_raid_stride_physical(raid_stride, physical);
}
stripe_key.objectid = bioc->logical;
stripe_key.type = BTRFS_RAID_STRIPE_KEY;
stripe_key.offset = bioc->size;
ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent,
item_size);
if (ret)
btrfs_abort_transaction(trans, ret);
kfree(stripe_extent);
return ret;
}
int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *ordered_extent)
{
struct btrfs_io_context *bioc;
int ret;
if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE))
return 0;
list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) {
ret = btrfs_insert_one_raid_extent(trans, bioc);
if (ret)
return ret;
}
while (!list_empty(&ordered_extent->bioc_list)) {
bioc = list_first_entry(&ordered_extent->bioc_list,
typeof(*bioc), rst_ordered_entry);
list_del(&bioc->rst_ordered_entry);
btrfs_put_bioc(bioc);
}
return ret;
}
int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length, u64 map_type,
u32 stripe_index, struct btrfs_io_stripe *stripe)
{
struct btrfs_root *stripe_root = fs_info->stripe_root;
struct btrfs_stripe_extent *stripe_extent;
struct btrfs_key stripe_key;
struct btrfs_key found_key;
struct btrfs_path *path;
struct extent_buffer *leaf;
const u64 end = logical + *length;
int num_stripes;
u8 encoding;
u64 offset;
u64 found_logical;
u64 found_length;
u64 found_end;
int slot;
int ret;
stripe_key.objectid = logical;
stripe_key.type = BTRFS_RAID_STRIPE_KEY;
stripe_key.offset = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (stripe->is_scrub) {
path->skip_locking = 1;
path->search_commit_root = 1;
}
ret = btrfs_search_slot(NULL, stripe_root, &stripe_key, path, 0, 0);
if (ret < 0)
goto free_path;
if (ret) {
if (path->slots[0] != 0)
path->slots[0]--;
}
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
found_logical = found_key.objectid;
found_length = found_key.offset;
found_end = found_logical + found_length;
if (found_logical > end) {
ret = -ENOENT;
goto out;
}
if (in_range(logical, found_logical, found_length))
break;
ret = btrfs_next_item(stripe_root, path);
if (ret)
goto out;
}
offset = logical - found_logical;
/*
* If we have a logically contiguous, but physically non-continuous
* range, we need to split the bio. Record the length after which we
* must split the bio.
*/
if (end > found_end)
*length -= end - found_end;
num_stripes = btrfs_num_raid_stripes(btrfs_item_size(leaf, slot));
stripe_extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
encoding = btrfs_stripe_extent_encoding(leaf, stripe_extent);
if (encoding != btrfs_bg_flags_to_raid_index(map_type)) {
ret = -EUCLEAN;
btrfs_handle_fs_error(fs_info, ret,
"on-disk stripe encoding %d doesn't match RAID index %d",
encoding,
btrfs_bg_flags_to_raid_index(map_type));
goto out;
}
for (int i = 0; i < num_stripes; i++) {
struct btrfs_raid_stride *stride = &stripe_extent->strides[i];
u64 devid = btrfs_raid_stride_devid(leaf, stride);
u64 physical = btrfs_raid_stride_physical(leaf, stride);
if (devid != stripe->dev->devid)
continue;
if ((map_type & BTRFS_BLOCK_GROUP_DUP) && stripe_index != i)
continue;
stripe->physical = physical + offset;
trace_btrfs_get_raid_extent_offset(fs_info, logical, *length,
stripe->physical, devid);
ret = 0;
goto free_path;
}
/* If we're here, we haven't found the requested devid in the stripe. */
ret = -ENOENT;
out:
if (ret > 0)
ret = -ENOENT;
if (ret && ret != -EIO && !stripe->is_scrub) {
if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
btrfs_print_tree(leaf, 1);
btrfs_err(fs_info,
"cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s",
logical, logical + *length, stripe->dev->devid,
btrfs_bg_type_to_raid_name(map_type));
}
free_path:
btrfs_free_path(path);
return ret;
}

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2023 Western Digital Corporation or its affiliates.
*/
#ifndef BTRFS_RAID_STRIPE_TREE_H
#define BTRFS_RAID_STRIPE_TREE_H
#define BTRFS_RST_SUPP_BLOCK_GROUP_MASK (BTRFS_BLOCK_GROUP_DUP | \
BTRFS_BLOCK_GROUP_RAID1_MASK | \
BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID10)
struct btrfs_io_context;
struct btrfs_io_stripe;
struct btrfs_ordered_extent;
struct btrfs_trans_handle;
int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length);
int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info,
u64 logical, u64 *length, u64 map_type,
u32 stripe_index, struct btrfs_io_stripe *stripe);
int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_extent *ordered_extent);
static inline bool btrfs_need_stripe_tree_update(struct btrfs_fs_info *fs_info,
u64 map_type)
{
u64 type = map_type & BTRFS_BLOCK_GROUP_TYPE_MASK;
u64 profile = map_type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE))
return false;
if (type != BTRFS_BLOCK_GROUP_DATA)
return false;
if (profile & BTRFS_RST_SUPP_BLOCK_GROUP_MASK)
return true;
return false;
}
static inline int btrfs_num_raid_stripes(u32 item_size)
{
return (item_size - offsetof(struct btrfs_stripe_extent, strides)) /
sizeof(struct btrfs_raid_stride);
}
#endif

View File

@ -485,6 +485,9 @@ static int process_extent_item(struct btrfs_fs_info *fs_info,
ret = add_shared_data_ref(fs_info, offset, count,
key->objectid, key->offset);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
break;
default:
btrfs_err(fs_info, "invalid key type in iref");
ret = -EINVAL;
@ -652,7 +655,7 @@ static void dump_block_entry(struct btrfs_fs_info *fs_info,
}
/*
* btrfs_ref_tree_mod: called when we modify a ref for a bytenr
* Called when we modify a ref for a bytenr.
*
* This will add an action item to the given bytenr and do sanity checks to make
* sure we haven't messed something up. If we are making a new allocation and
@ -681,10 +684,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
if (generic_ref->type == BTRFS_REF_METADATA) {
if (!parent)
ref_root = generic_ref->tree_ref.owning_root;
ref_root = generic_ref->tree_ref.ref_root;
owner = generic_ref->tree_ref.level;
} else if (!parent) {
ref_root = generic_ref->data_ref.owning_root;
ref_root = generic_ref->data_ref.ref_root;
owner = generic_ref->data_ref.ino;
offset = generic_ref->data_ref.offset;
}

View File

@ -25,7 +25,6 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
const u64 olen,
int no_time_update)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
inode_inc_iversion(inode);
@ -43,7 +42,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
}
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);

View File

@ -111,8 +111,8 @@ struct tree_block {
}; /* Use rb_simple_node for search/insert */
u64 owner;
struct btrfs_key key;
unsigned int level:8;
unsigned int key_ready:1;
u8 level;
bool key_ready;
};
#define MAX_EXTENTS 128
@ -122,6 +122,13 @@ struct file_extent_cluster {
u64 end;
u64 boundary[MAX_EXTENTS];
unsigned int nr;
u64 owning_root;
};
/* Stages of data relocation. */
enum reloc_stage {
MOVE_DATA_EXTENTS,
UPDATE_DATA_PTRS
};
struct reloc_control {
@ -155,16 +162,12 @@ struct reloc_control {
u64 search_start;
u64 extents_found;
unsigned int stage:8;
unsigned int create_reloc_tree:1;
unsigned int merge_reloc_tree:1;
unsigned int found_file_extent:1;
enum reloc_stage stage;
bool create_reloc_tree;
bool merge_reloc_tree;
bool found_file_extent;
};
/* stages of data relocation */
#define MOVE_DATA_EXTENTS 0
#define UPDATE_DATA_PTRS 1
static void mark_block_processed(struct reloc_control *rc,
struct btrfs_backref_node *node)
{
@ -180,13 +183,6 @@ static void mark_block_processed(struct reloc_control *rc,
node->processed = 1;
}
static void mapping_tree_init(struct mapping_tree *tree)
{
tree->rb_root = RB_ROOT;
spin_lock_init(&tree->lock);
}
/*
* walk up backref nodes until reach node presents tree root
*/
@ -299,7 +295,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
return 1;
}
static bool reloc_root_is_dead(struct btrfs_root *root)
static bool reloc_root_is_dead(const struct btrfs_root *root)
{
/*
* Pair with set_bit/clear_bit in clean_dirty_subvols and
@ -320,7 +316,7 @@ static bool reloc_root_is_dead(struct btrfs_root *root)
* from no reloc root. But btrfs_should_ignore_reloc_root() below is a
* special case.
*/
static bool have_reloc_root(struct btrfs_root *root)
static bool have_reloc_root(const struct btrfs_root *root)
{
if (reloc_root_is_dead(root))
return false;
@ -329,31 +325,30 @@ static bool have_reloc_root(struct btrfs_root *root)
return true;
}
int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return 0;
return false;
/* This root has been merged with its reloc tree, we can ignore it */
if (reloc_root_is_dead(root))
return 1;
return true;
reloc_root = root->reloc_root;
if (!reloc_root)
return 0;
return false;
if (btrfs_header_generation(reloc_root->commit_root) ==
root->fs_info->running_transaction->transid)
return 0;
return false;
/*
* if there is reloc tree and it was created in previous
* transaction backref lookup can find the reloc tree,
* so backref node for the fs tree root is useless for
* relocation.
* If there is reloc tree and it was created in previous transaction
* backref lookup can find the reloc tree, so backref node for the fs
* tree root is useless for relocation.
*/
return 1;
return true;
}
/*
@ -547,7 +542,7 @@ out:
*/
static int clone_backref_node(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
struct btrfs_root *src,
const struct btrfs_root *src,
struct btrfs_root *dest)
{
struct btrfs_root *reloc_root = src->reloc_root;
@ -632,7 +627,7 @@ fail:
/*
* helper to add 'address of tree root -> reloc tree' mapping
*/
static int __must_check __add_reloc_root(struct btrfs_root *root)
static int __add_reloc_root(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *rb_node;
@ -1159,7 +1154,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
key.offset -= btrfs_file_extent_offset(leaf, fi);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
num_bytes, parent);
num_bytes, parent, root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
key.objectid, key.offset,
root->root_key.objectid, false);
@ -1170,7 +1165,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
num_bytes, parent);
num_bytes, parent, root->root_key.objectid);
btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
key.objectid, key.offset,
root->root_key.objectid, false);
@ -1181,15 +1176,15 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
}
}
if (dirty)
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (inode)
btrfs_add_delayed_iput(BTRFS_I(inode));
return ret;
}
static noinline_for_stack
int memcmp_node_keys(struct extent_buffer *eb, int slot,
struct btrfs_path *path, int level)
static noinline_for_stack int memcmp_node_keys(const struct extent_buffer *eb,
int slot, const struct btrfs_path *path,
int level)
{
struct btrfs_disk_key key1;
struct btrfs_disk_key key2;
@ -1374,16 +1369,17 @@ again:
*/
btrfs_set_node_blockptr(parent, slot, new_bytenr);
btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
btrfs_mark_buffer_dirty(parent);
btrfs_mark_buffer_dirty(trans, parent);
btrfs_set_node_blockptr(path->nodes[level],
path->slots[level], old_bytenr);
btrfs_set_node_ptr_generation(path->nodes[level],
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
btrfs_mark_buffer_dirty(trans, path->nodes[level]);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
blocksize, path->nodes[level]->start);
blocksize, path->nodes[level]->start,
src->root_key.objectid);
btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
0, true);
ret = btrfs_inc_extent_ref(trans, &ref);
@ -1392,7 +1388,7 @@ again:
break;
}
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
blocksize, 0);
blocksize, 0, dest->root_key.objectid);
btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
true);
ret = btrfs_inc_extent_ref(trans, &ref);
@ -1401,8 +1397,9 @@ again:
break;
}
/* We don't know the real owning_root, use 0. */
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
blocksize, path->nodes[level]->start);
blocksize, path->nodes[level]->start, 0);
btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
0, true);
ret = btrfs_free_extent(trans, &ref);
@ -1411,8 +1408,9 @@ again:
break;
}
/* We don't know the real owning_root, use 0. */
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
blocksize, 0);
blocksize, 0, 0);
btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
0, true);
ret = btrfs_free_extent(trans, &ref);
@ -1518,8 +1516,8 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
* [min_key, max_key)
*/
static int invalidate_extent_cache(struct btrfs_root *root,
struct btrfs_key *min_key,
struct btrfs_key *max_key)
const struct btrfs_key *min_key,
const struct btrfs_key *max_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode = NULL;
@ -1897,7 +1895,7 @@ again:
}
}
rc->merge_reloc_tree = 1;
rc->merge_reloc_tree = true;
while (!list_empty(&rc->reloc_roots)) {
reloc_root = list_entry(rc->reloc_roots.next,
@ -2517,11 +2515,12 @@ static int do_relocation(struct btrfs_trans_handle *trans,
node->eb->start);
btrfs_set_node_ptr_generation(upper->eb, slot,
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
btrfs_mark_buffer_dirty(trans, upper->eb);
btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
node->eb->start, blocksize,
upper->eb->start);
upper->eb->start,
btrfs_header_owner(upper->eb));
btrfs_init_tree_ref(&ref, node->level,
btrfs_header_owner(upper->eb),
root->root_key.objectid, false);
@ -2633,7 +2632,7 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
u32 blocksize = rc->extent_root->fs_info->nodesize;
if (test_range_bit(&rc->processed_blocks, bytenr,
bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
bytenr + blocksize - 1, EXTENT_DIRTY, NULL))
return 1;
return 0;
}
@ -2660,7 +2659,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
else
btrfs_node_key_to_cpu(eb, &block->key, 0);
free_extent_buffer(eb);
block->key_ready = 1;
block->key_ready = true;
return 0;
}
@ -2830,7 +2829,7 @@ out_free_blocks:
static noinline_for_stack int prealloc_file_extent_cluster(
struct btrfs_inode *inode,
struct file_extent_cluster *cluster)
const struct file_extent_cluster *cluster)
{
u64 alloc_hint = 0;
u64 start;
@ -2965,7 +2964,7 @@ static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inod
/*
* Allow error injection to test balance/relocation cancellation
*/
noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
noinline int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info)
{
return atomic_read(&fs_info->balance_cancel_req) ||
atomic_read(&fs_info->reloc_cancel_req) ||
@ -2973,7 +2972,7 @@ noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
}
ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
static u64 get_cluster_boundary_end(const struct file_extent_cluster *cluster,
int cluster_nr)
{
/* Last extent, use cluster end directly */
@ -2985,7 +2984,7 @@ static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
}
static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
struct file_extent_cluster *cluster,
const struct file_extent_cluster *cluster,
int *cluster_nr, unsigned long page_index)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -3120,7 +3119,7 @@ release_page:
}
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
const struct file_extent_cluster *cluster)
{
u64 offset = BTRFS_I(inode)->index_cnt;
unsigned long index;
@ -3158,11 +3157,12 @@ out:
return ret;
}
static noinline_for_stack
int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
struct file_extent_cluster *cluster)
static noinline_for_stack int relocate_data_extent(struct inode *inode,
const struct btrfs_key *extent_key,
struct file_extent_cluster *cluster)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
ret = relocate_file_extent_cluster(inode, cluster);
@ -3171,8 +3171,38 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
cluster->nr = 0;
}
if (!cluster->nr)
/*
* Under simple quotas, we set root->relocation_src_root when we find
* the extent. If adjacent extents have different owners, we can't merge
* them while relocating. Handle this by storing the owning root that
* started a cluster and if we see an extent from a different root break
* cluster formation (just like the above case of non-adjacent extents).
*
* Without simple quotas, relocation_src_root is always 0, so we should
* never see a mismatch, and it should have no effect on relocation
* clusters.
*/
if (cluster->nr > 0 && cluster->owning_root != root->relocation_src_root) {
u64 tmp = root->relocation_src_root;
/*
* root->relocation_src_root is the state that actually affects
* the preallocation we do here, so set it to the root owning
* the cluster we need to relocate.
*/
root->relocation_src_root = cluster->owning_root;
ret = relocate_file_extent_cluster(inode, cluster);
if (ret)
return ret;
cluster->nr = 0;
/* And reset it back for the current extent's owning root. */
root->relocation_src_root = tmp;
}
if (!cluster->nr) {
cluster->start = extent_key->objectid;
cluster->owning_root = root->relocation_src_root;
}
else
BUG_ON(cluster->nr >= MAX_EXTENTS);
cluster->end = extent_key->objectid + extent_key->offset - 1;
@ -3193,7 +3223,7 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
* the major work is getting the generation and level of the block
*/
static int add_tree_block(struct reloc_control *rc,
struct btrfs_key *extent_key,
const struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
@ -3278,7 +3308,7 @@ static int add_tree_block(struct reloc_control *rc,
block->key.objectid = rc->extent_root->fs_info->nodesize;
block->key.offset = generation;
block->level = level;
block->key_ready = 0;
block->key_ready = false;
block->owner = owner;
rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
@ -3444,11 +3474,10 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
/*
* helper to find all tree blocks that reference a given data extent
*/
static noinline_for_stack
int add_data_references(struct reloc_control *rc,
struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
static noinline_for_stack int add_data_references(struct reloc_control *rc,
const struct btrfs_key *extent_key,
struct btrfs_path *path,
struct rb_root *blocks)
{
struct btrfs_backref_walk_ctx ctx = { 0 };
struct ulist_iterator leaf_uiter;
@ -3622,7 +3651,7 @@ int prepare_to_relocate(struct reloc_control *rc)
if (ret)
return ret;
rc->create_reloc_tree = 1;
rc->create_reloc_tree = true;
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root);
@ -3702,6 +3731,21 @@ restart:
struct btrfs_extent_item);
flags = btrfs_extent_flags(path->nodes[0], ei);
/*
* If we are relocating a simple quota owned extent item, we
* need to note the owner on the reloc data root so that when
* we allocate the replacement item, we can attribute it to the
* correct eventual owner (rather than the reloc data root).
*/
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE) {
struct btrfs_root *root = BTRFS_I(rc->data_inode)->root;
u64 owning_root_id = btrfs_get_extent_owner_root(fs_info,
path->nodes[0],
path->slots[0]);
root->relocation_src_root = owning_root_id;
}
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
ret = add_tree_block(rc, &key, path, &blocks);
} else if (rc->stage == UPDATE_DATA_PTRS &&
@ -3734,7 +3778,7 @@ restart:
if (rc->stage == MOVE_DATA_EXTENTS &&
(flags & BTRFS_EXTENT_FLAG_DATA)) {
rc->found_file_extent = 1;
rc->found_file_extent = true;
ret = relocate_data_extent(rc->data_inode,
&key, &rc->cluster);
if (ret < 0) {
@ -3771,7 +3815,7 @@ restart:
err = ret;
}
rc->create_reloc_tree = 0;
rc->create_reloc_tree = false;
set_reloc_control(rc);
btrfs_backref_release_cache(&rc->backref_cache);
@ -3789,7 +3833,7 @@ restart:
merge_reloc_roots(rc);
rc->merge_reloc_tree = 0;
rc->merge_reloc_tree = false;
unset_reloc_control(rc);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
@ -3835,7 +3879,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
return ret;
@ -3874,9 +3918,9 @@ out:
* helper to create inode for data relocation.
* the inode is in data relocation tree and its link count is 0
*/
static noinline_for_stack
struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
struct btrfs_block_group *group)
static noinline_for_stack struct inode *create_reloc_inode(
struct btrfs_fs_info *fs_info,
const struct btrfs_block_group *group)
{
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
@ -3971,8 +4015,9 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&rc->reloc_roots);
INIT_LIST_HEAD(&rc->dirty_subvol_roots);
btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
mapping_tree_init(&rc->reloc_root_tree);
btrfs_backref_init_cache(fs_info, &rc->backref_cache, true);
rc->reloc_root_tree.rb_root = RB_ROOT;
spin_lock_init(&rc->reloc_root_tree.lock);
extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
return rc;
}
@ -4004,7 +4049,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
block_group->start, buf);
}
static const char *stage_to_string(int stage)
static const char *stage_to_string(enum reloc_stage stage)
{
if (stage == MOVE_DATA_EXTENTS)
return "move data extents";
@ -4120,7 +4165,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(ret && ret != -EAGAIN);
while (1) {
int finishes_stage;
enum reloc_stage finishes_stage;
mutex_lock(&fs_info->cleaner_mutex);
ret = relocate_block_group(rc);
@ -4303,7 +4348,7 @@ int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
goto out_unset;
}
rc->merge_reloc_tree = 1;
rc->merge_reloc_tree = true;
while (!list_empty(&reloc_roots)) {
reloc_root = list_entry(reloc_roots.next,
@ -4422,7 +4467,8 @@ int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
}
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct btrfs_root *root,
const struct extent_buffer *buf,
struct extent_buffer *cow)
{
struct btrfs_fs_info *fs_info = root->fs_info;
@ -4561,7 +4607,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
*
* Return U64_MAX if no running relocation.
*/
u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info)
u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info)
{
u64 logical = U64_MAX;

View File

@ -10,15 +10,16 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
int btrfs_recover_relocation(struct btrfs_fs_info *fs_info);
int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered);
int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf,
struct btrfs_root *root,
const struct extent_buffer *buf,
struct extent_buffer *cow);
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve);
int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_should_cancel_balance(const struct btrfs_fs_info *fs_info);
struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info);
bool btrfs_should_ignore_reloc_root(const struct btrfs_root *root);
u64 btrfs_get_reloc_bg_bytenr(const struct btrfs_fs_info *fs_info);
#endif

View File

@ -51,7 +51,8 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
}
/*
* btrfs_find_root - lookup the root by the key.
* Lookup the root by the key.
*
* root: the root of the root tree
* search_key: the key to search
* path: the path we search
@ -191,7 +192,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_set_root_generation_v2(item, btrfs_root_generation(item));
write_extent_buffer(l, item, ptr, sizeof(*item));
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
@ -438,7 +439,7 @@ again:
btrfs_set_root_ref_name_len(leaf, ref, name->len);
ptr = (unsigned long)(ref + 1);
write_extent_buffer(leaf, name->name, ptr, name->len);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(path);
@ -485,7 +486,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
}
/*
* btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
* Reserve space for subvolume operation.
*
* root: the root of the parent directory
* rsv: block reservation
* items: the number of items that we need do reservation
@ -508,7 +510,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
if (btrfs_qgroup_enabled(fs_info)) {
/* One for parent inode, two for dir entries */
qgroup_num_bytes = 3 * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta_prealloc(root,

View File

@ -3,6 +3,8 @@
#ifndef BTRFS_ROOT_TREE_H
#define BTRFS_ROOT_TREE_H
struct fscrypt_str;
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems, bool use_global_rsv);
@ -18,10 +20,8 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, const struct btrfs_key *key
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
const struct btrfs_key *key,
struct btrfs_root_item *item);
int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_key *key,
struct btrfs_root_item *item);
int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_key *key, struct btrfs_root_item *item);
int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key,
struct btrfs_path *path, struct btrfs_root_item *root_item,
struct btrfs_key *root_key);

View File

@ -16,7 +16,6 @@
#include "backref.h"
#include "extent_io.h"
#include "dev-replace.h"
#include "check-integrity.h"
#include "raid56.h"
#include "block-group.h"
#include "zoned.h"
@ -24,6 +23,7 @@
#include "accessors.h"
#include "file-item.h"
#include "scrub.h"
#include "raid-stripe-tree.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@ -897,7 +897,7 @@ static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
ASSERT(stripe->mirror_num >= 1);
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
stripe->logical, &mapped_len, &bioc,
NULL, NULL, 1);
NULL, NULL);
/*
* If we failed, dev will be NULL, and later detailed reports
* will just be skipped.
@ -1635,6 +1635,71 @@ static void scrub_reset_stripe(struct scrub_stripe *stripe)
}
}
static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
struct scrub_stripe *stripe)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
u64 stripe_len = BTRFS_STRIPE_LEN;
int mirror = stripe->mirror_num;
int i;
atomic_inc(&stripe->pending_io);
for_each_set_bit(i, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
struct page *page = scrub_stripe_get_page(stripe, i);
unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
/* The current sector cannot be merged, submit the bio. */
if (bbio &&
((i > 0 &&
!test_bit(i - 1, &stripe->extent_sector_bitmap)) ||
bbio->bio.bi_iter.bi_size >= stripe_len)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
btrfs_submit_bio(bbio, mirror);
bbio = NULL;
}
if (!bbio) {
struct btrfs_io_stripe io_stripe = {};
struct btrfs_io_context *bioc = NULL;
const u64 logical = stripe->logical +
(i << fs_info->sectorsize_bits);
int err;
bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
fs_info, scrub_read_endio, stripe);
bbio->bio.bi_iter.bi_sector = logical >> SECTOR_SHIFT;
io_stripe.is_scrub = true;
err = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
&stripe_len, &bioc, &io_stripe,
&mirror);
btrfs_put_bioc(bioc);
if (err) {
btrfs_bio_end_io(bbio,
errno_to_blk_status(err));
return;
}
}
__bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
}
if (bbio) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
btrfs_submit_bio(bbio, mirror);
}
if (atomic_dec_and_test(&stripe->pending_io)) {
wake_up(&stripe->io_wait);
INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
}
}
static void scrub_submit_initial_read(struct scrub_ctx *sctx,
struct scrub_stripe *stripe)
{
@ -1646,6 +1711,11 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
ASSERT(stripe->mirror_num > 0);
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
if (btrfs_need_stripe_tree_update(fs_info, stripe->bg->flags)) {
scrub_submit_extent_sector_read(sctx, stripe);
return;
}
bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
scrub_read_endio, stripe);
@ -1952,7 +2022,7 @@ static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
&length, &bioc, NULL, NULL, 1);
&length, &bioc, NULL, NULL);
if (ret < 0) {
btrfs_put_bioc(bioc);
btrfs_bio_counter_dec(fs_info);
@ -2717,7 +2787,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation;
else
gen = fs_info->last_trans_committed;
gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);

View File

@ -796,7 +796,7 @@ static int send_cmd(struct send_ctx *sctx)
put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
put_unaligned_le32(0, &hdr->crc);
crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
put_unaligned_le32(crc, &hdr->crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@ -5669,8 +5669,8 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
hdr = (struct btrfs_cmd_header *)sctx->send_buf;
hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
hdr->crc = 0;
crc = btrfs_crc32c(0, sctx->send_buf, sctx->send_size);
crc = btrfs_crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
crc = crc32c(0, sctx->send_buf, sctx->send_size);
crc = crc32c(crc, sctx->send_buf + data_offset, disk_num_bytes);
hdr->crc = cpu_to_le32(crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,

View File

@ -345,8 +345,10 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_space_info *data_sinfo;
u64 profile;
u64 avail;
u64 data_chunk_size;
int factor;
if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
@ -364,6 +366,36 @@ static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
*/
factor = btrfs_bg_type_to_factor(profile);
avail = div_u64(avail, factor);
if (avail == 0)
return 0;
/*
* Calculate the data_chunk_size, space_info->chunk_size is the
* "optimal" chunk size based on the fs size. However when we actually
* allocate the chunk we will strip this down further, making it no more
* than 10% of the disk or 1G, whichever is smaller.
*/
data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
data_chunk_size = min(data_sinfo->chunk_size,
mult_perc(fs_info->fs_devices->total_rw_bytes, 10));
data_chunk_size = min_t(u64, data_chunk_size, SZ_1G);
/*
* Since data allocations immediately use block groups as part of the
* reservation, because we assume that data reservations will == actual
* usage, we could potentially overcommit and then immediately have that
* available space used by a data allocation, which could put us in a
* bind when we get close to filling the file system.
*
* To handle this simply remove the data_chunk_size from the available
* space. If we are relatively empty this won't affect our ability to
* overcommit much, and if we're very close to full it'll keep us from
* getting into a position where we've given ourselves very little
* metadata wiggle room.
*/
if (avail <= data_chunk_size)
return 0;
avail -= data_chunk_size;
/*
* If we aren't flushing all things, let us overcommit up to
@ -556,18 +588,6 @@ static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info,
return nr;
}
static inline u64 calc_delayed_refs_nr(const struct btrfs_fs_info *fs_info,
u64 to_reclaim)
{
const u64 bytes = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 nr;
nr = div64_u64(to_reclaim, bytes);
if (!nr)
nr = 1;
return nr;
}
#define EXTENT_SIZE_PER_ITEM SZ_256K
/*
@ -749,10 +769,9 @@ static void flush_space(struct btrfs_fs_info *fs_info,
break;
}
if (state == FLUSH_DELAYED_REFS_NR)
nr = calc_delayed_refs_nr(fs_info, num_bytes);
btrfs_run_delayed_refs(trans, num_bytes);
else
nr = 0;
btrfs_run_delayed_refs(trans, nr);
btrfs_run_delayed_refs(trans, 0);
btrfs_end_transaction(trans);
break;
case ALLOC_CHUNK:
@ -978,7 +997,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
}
/*
* maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
* We've exhausted our flushing, start failing tickets.
*
* @fs_info - fs_info for this fs
* @space_info - the space info we were flushing
*
@ -1742,7 +1762,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* Try to reserve metadata bytes from the block_rsv's space.
*
* @fs_info: the filesystem
* @block_rsv: block_rsv we're allocating for
* @space_info: the space_info we're allocating for
* @orig_bytes: number of bytes we want
* @flush: whether or not we can flush to make our reservation
*
@ -1754,21 +1774,19 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
* space already.
*/
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush)
{
int ret;
ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush);
if (ret == -ENOSPC) {
trace_btrfs_space_reservation(fs_info, "space_info:enospc",
block_rsv->space_info->flags,
orig_bytes, 1);
space_info->flags, orig_bytes, 1);
if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
btrfs_dump_space_info(fs_info, block_rsv->space_info,
orig_bytes, 0);
btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0);
}
return ret;
}

View File

@ -3,6 +3,7 @@
#ifndef BTRFS_SPACE_INFO_H
#define BTRFS_SPACE_INFO_H
#include <trace/events/btrfs.h>
#include "volumes.h"
/*
@ -212,7 +213,7 @@ void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_space_info *space_info,
u64 orig_bytes,
enum btrfs_reserve_flush_enum flush);
void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,

View File

@ -26,6 +26,7 @@
#include <linux/ratelimit.h>
#include <linux/crc32c.h>
#include <linux/btrfs.h>
#include <linux/security.h>
#include "messages.h"
#include "delayed-inode.h"
#include "ctree.h"
@ -129,9 +130,6 @@ enum {
Opt_inode_cache, Opt_noinode_cache,
/* Debugging options */
Opt_check_integrity,
Opt_check_integrity_including_extent_data,
Opt_check_integrity_print_mask,
Opt_enospc_debug, Opt_noenospc_debug,
#ifdef CONFIG_BTRFS_DEBUG
Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all,
@ -200,9 +198,6 @@ static const match_table_t tokens = {
{Opt_recovery, "recovery"},
/* Debugging options */
{Opt_check_integrity, "check_int"},
{Opt_check_integrity_including_extent_data, "check_int_data"},
{Opt_check_integrity_print_mask, "check_int_print_mask=%u"},
{Opt_enospc_debug, "enospc_debug"},
{Opt_noenospc_debug, "noenospc_debug"},
#ifdef CONFIG_BTRFS_DEBUG
@ -707,44 +702,6 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
case Opt_skip_balance:
btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
break;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
case Opt_check_integrity_including_extent_data:
btrfs_warn(info,
"integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info,
"enabling check integrity including extent data");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity:
btrfs_warn(info,
"integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info, "enabling check integrity");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity_print_mask:
ret = match_int(&args[0], &intarg);
if (ret) {
btrfs_err(info,
"unrecognized check_integrity_print_mask value %s",
args[0].from);
goto out;
}
info->check_integrity_print_mask = intarg;
btrfs_warn(info,
"integrity checker is deprecated and will be removed in 6.7");
btrfs_info(info, "check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
break;
#else
case Opt_check_integrity_including_extent_data:
case Opt_check_integrity:
case Opt_check_integrity_print_mask:
btrfs_err(info,
"support for check_integrity* not compiled in!");
ret = -EINVAL;
goto out;
#endif
case Opt_fatal_errors:
if (strcmp(args[0].from, "panic") == 0) {
btrfs_set_opt(info->mount_opt,
@ -889,7 +846,7 @@ static int btrfs_parse_device_options(const char *options, blk_mode_t flags)
error = -ENOMEM;
goto out;
}
device = btrfs_scan_one_device(device_name, flags);
device = btrfs_scan_one_device(device_name, flags, false);
kfree(device_name);
if (IS_ERR(device)) {
error = PTR_ERR(device);
@ -1305,15 +1262,6 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",autodefrag");
if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(info, CHECK_INTEGRITY_DATA))
seq_puts(seq, ",check_int_data");
else if (btrfs_test_opt(info, CHECK_INTEGRITY))
seq_puts(seq, ",check_int");
if (info->check_integrity_print_mask)
seq_printf(seq, ",check_int_print_mask=%d",
info->check_integrity_print_mask);
#endif
if (info->metadata_ratio)
seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio);
if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
@ -1484,7 +1432,12 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type,
goto error_fs_info;
}
device = btrfs_scan_one_device(device_name, mode);
/*
* With 'true' passed to btrfs_scan_one_device() (mount time) we expect
* either a valid device or an error.
*/
device = btrfs_scan_one_device(device_name, mode, true);
ASSERT(device != NULL);
if (IS_ERR(device)) {
mutex_unlock(&uuid_mutex);
error = PTR_ERR(device);
@ -2196,7 +2149,11 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
mutex_lock(&uuid_mutex);
device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ);
/*
* Scanning outside of mount can return NULL which would turn
* into 0 error code.
*/
device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
ret = PTR_ERR_OR_ZERO(device);
mutex_unlock(&uuid_mutex);
break;
@ -2210,8 +2167,12 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
break;
case BTRFS_IOC_DEVICES_READY:
mutex_lock(&uuid_mutex);
device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ);
if (IS_ERR(device)) {
/*
* Scanning outside of mount can return NULL which would turn
* into 0 error code.
*/
device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false);
if (IS_ERR_OR_NULL(device)) {
mutex_unlock(&uuid_mutex);
ret = PTR_ERR(device);
break;
@ -2256,6 +2217,7 @@ static int check_dev_super(struct btrfs_device *dev)
{
struct btrfs_fs_info *fs_info = dev->fs_info;
struct btrfs_super_block *sb;
u64 last_trans;
u16 csum_type;
int ret = 0;
@ -2291,10 +2253,10 @@ static int check_dev_super(struct btrfs_device *dev)
if (ret < 0)
goto out;
if (btrfs_super_generation(sb) != fs_info->last_trans_committed) {
last_trans = btrfs_get_last_trans_committed(fs_info);
if (btrfs_super_generation(sb) != last_trans) {
btrfs_err(fs_info, "transid mismatch, has %llu expect %llu",
btrfs_super_generation(sb),
fs_info->last_trans_committed);
btrfs_super_generation(sb), last_trans);
ret = -EUCLEAN;
goto out;
}
@ -2404,9 +2366,6 @@ static int __init btrfs_print_mod_info(void)
#ifdef CONFIG_BTRFS_ASSERT
", assert=on"
#endif
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
", integrity-checker=on"
#endif
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
", ref-verify=on"
#endif

View File

@ -291,12 +291,15 @@ BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
BTRFS_FEAT_ATTR_COMPAT_RO(block_group_tree, BLOCK_GROUP_TREE);
BTRFS_FEAT_ATTR_INCOMPAT(raid1c34, RAID1C34);
BTRFS_FEAT_ATTR_INCOMPAT(simple_quota, SIMPLE_QUOTA);
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_INCOMPAT(zoned, ZONED);
#endif
#ifdef CONFIG_BTRFS_DEBUG
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT(extent_tree_v2, EXTENT_TREE_V2);
/* Remove once support for raid stripe tree is feature complete. */
BTRFS_FEAT_ATTR_INCOMPAT(raid_stripe_tree, RAID_STRIPE_TREE);
#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_COMPAT_RO(verity, VERITY);
@ -322,11 +325,13 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(free_space_tree),
BTRFS_FEAT_ATTR_PTR(raid1c34),
BTRFS_FEAT_ATTR_PTR(block_group_tree),
BTRFS_FEAT_ATTR_PTR(simple_quota),
#ifdef CONFIG_BLK_DEV_ZONED
BTRFS_FEAT_ATTR_PTR(zoned),
#endif
#ifdef CONFIG_BTRFS_DEBUG
BTRFS_FEAT_ATTR_PTR(extent_tree_v2),
BTRFS_FEAT_ATTR_PTR(raid_stripe_tree),
#endif
#ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_PTR(verity),
@ -420,6 +425,13 @@ static ssize_t acl_show(struct kobject *kobj, struct kobj_attribute *a, char *bu
}
BTRFS_ATTR(static_feature, acl, acl_show);
static ssize_t temp_fsid_supported_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
return sysfs_emit(buf, "0\n");
}
BTRFS_ATTR(static_feature, temp_fsid, temp_fsid_supported_show);
/*
* Features which only depend on kernel version.
*
@ -433,6 +445,7 @@ static struct attribute *btrfs_supported_static_feature_attrs[] = {
BTRFS_ATTR_PTR(static_feature, send_stream_version),
BTRFS_ATTR_PTR(static_feature, supported_rescue_options),
BTRFS_ATTR_PTR(static_feature, supported_sectorsizes),
BTRFS_ATTR_PTR(static_feature, temp_fsid),
NULL
};
@ -1196,10 +1209,19 @@ static ssize_t btrfs_generation_show(struct kobject *kobj,
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
return sysfs_emit(buf, "%llu\n", fs_info->generation);
return sysfs_emit(buf, "%llu\n", btrfs_get_fs_generation(fs_info));
}
BTRFS_ATTR(, generation, btrfs_generation_show);
static ssize_t btrfs_temp_fsid_show(struct kobject *kobj,
struct kobj_attribute *a, char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
return sysfs_emit(buf, "%d\n", fs_info->fs_devices->temp_fsid);
}
BTRFS_ATTR(, temp_fsid, btrfs_temp_fsid_show);
static const char * const btrfs_read_policy_name[] = { "pid" };
static ssize_t btrfs_read_policy_show(struct kobject *kobj,
@ -1302,6 +1324,7 @@ static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, read_policy),
BTRFS_ATTR_PTR(, bg_reclaim_threshold),
BTRFS_ATTR_PTR(, commit_stats),
BTRFS_ATTR_PTR(, temp_fsid),
NULL,
};
@ -2086,6 +2109,33 @@ static ssize_t qgroup_enabled_show(struct kobject *qgroups_kobj,
}
BTRFS_ATTR(qgroups, enabled, qgroup_enabled_show);
static ssize_t qgroup_mode_show(struct kobject *qgroups_kobj,
struct kobj_attribute *a,
char *buf)
{
struct btrfs_fs_info *fs_info = to_fs_info(qgroups_kobj->parent);
ssize_t ret = 0;
spin_lock(&fs_info->qgroup_lock);
ASSERT(btrfs_qgroup_enabled(fs_info));
switch (btrfs_qgroup_mode(fs_info)) {
case BTRFS_QGROUP_MODE_FULL:
ret = sysfs_emit(buf, "qgroup\n");
break;
case BTRFS_QGROUP_MODE_SIMPLE:
ret = sysfs_emit(buf, "squota\n");
break;
default:
btrfs_warn(fs_info, "unexpected qgroup mode %d\n",
btrfs_qgroup_mode(fs_info));
break;
}
spin_unlock(&fs_info->qgroup_lock);
return ret;
}
BTRFS_ATTR(qgroups, mode, qgroup_mode_show);
static ssize_t qgroup_inconsistent_show(struct kobject *qgroups_kobj,
struct kobj_attribute *a,
char *buf)
@ -2148,6 +2198,7 @@ static struct attribute *qgroups_attrs[] = {
BTRFS_ATTR_PTR(qgroups, enabled),
BTRFS_ATTR_PTR(qgroups, inconsistent),
BTRFS_ATTR_PTR(qgroups, drop_subtree_threshold),
BTRFS_ATTR_PTR(qgroups, mode),
NULL
};
ATTRIBUTE_GROUPS(qgroups);

View File

@ -61,7 +61,11 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = 0;
btrfs_setup_item_for_insert(root, path, &key, value_len);
/*
* Passing a NULL trans handle is fine here, we have a dummy root eb
* and the tree is a single node (level 0).
*/
btrfs_setup_item_for_insert(NULL, root, path, &key, value_len);
write_extent_buffer(eb, value, btrfs_item_ptr_offset(eb, 0),
value_len);

View File

@ -34,7 +34,11 @@ static void insert_extent(struct btrfs_root *root, u64 start, u64 len,
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = start;
btrfs_setup_item_for_insert(root, &path, &key, value_len);
/*
* Passing a NULL trans handle is fine here, we have a dummy root eb
* and the tree is a single node (level 0).
*/
btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi, 1);
btrfs_set_file_extent_type(leaf, fi, type);
@ -64,7 +68,11 @@ static void insert_inode_item_key(struct btrfs_root *root)
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
btrfs_setup_item_for_insert(root, &path, &key, value_len);
/*
* Passing a NULL trans handle is fine here, we have a dummy root eb
* and the tree is a single node (level 0).
*/
btrfs_setup_item_for_insert(NULL, root, &path, &key, value_len);
}
/*

View File

@ -386,7 +386,7 @@ loop:
IO_TREE_TRANS_DIRTY_PAGES);
extent_io_tree_init(fs_info, &cur_trans->pinned_extents,
IO_TREE_FS_PINNED_EXTENTS);
fs_info->generation++;
btrfs_set_fs_generation(fs_info, fs_info->generation + 1);
cur_trans->transid = fs_info->generation;
fs_info->running_transaction = cur_trans;
cur_trans->aborted = 0;
@ -561,6 +561,69 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root)
return true;
}
static int btrfs_reserve_trans_metadata(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush,
u64 num_bytes,
u64 *delayed_refs_bytes)
{
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_space_info *si = fs_info->trans_block_rsv.space_info;
u64 extra_delayed_refs_bytes = 0;
u64 bytes;
int ret;
/*
* If there's a gap between the size of the delayed refs reserve and
* its reserved space, than some tasks have added delayed refs or bumped
* its size otherwise (due to block group creation or removal, or block
* group item update). Also try to allocate that gap in order to prevent
* using (and possibly abusing) the global reserve when committing the
* transaction.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL &&
!btrfs_block_rsv_full(delayed_refs_rsv)) {
spin_lock(&delayed_refs_rsv->lock);
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved)
extra_delayed_refs_bytes = delayed_refs_rsv->size -
delayed_refs_rsv->reserved;
spin_unlock(&delayed_refs_rsv->lock);
}
bytes = num_bytes + *delayed_refs_bytes + extra_delayed_refs_bytes;
/*
* We want to reserve all the bytes we may need all at once, so we only
* do 1 enospc flushing cycle per transaction start.
*/
ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
if (ret == 0) {
if (extra_delayed_refs_bytes > 0)
btrfs_migrate_to_delayed_refs_rsv(fs_info,
extra_delayed_refs_bytes);
return 0;
}
if (extra_delayed_refs_bytes > 0) {
bytes -= extra_delayed_refs_bytes;
ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
if (ret == 0)
return 0;
}
/*
* If we are an emergency flush, which can steal from the global block
* reserve, then attempt to not reserve space for the delayed refs, as
* we will consume space for them from the global block reserve.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
bytes -= *delayed_refs_bytes;
*delayed_refs_bytes = 0;
ret = btrfs_reserve_metadata_bytes(fs_info, si, bytes, flush);
}
return ret;
}
static struct btrfs_trans_handle *
start_transaction(struct btrfs_root *root, unsigned int num_items,
unsigned int type, enum btrfs_reserve_flush_enum flush,
@ -568,10 +631,12 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
u64 qgroup_reserved = 0;
u64 delayed_refs_bytes = 0;
bool reloc_reserved = false;
bool do_chunk_alloc = false;
int ret;
@ -594,9 +659,6 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* the appropriate flushing if need be.
*/
if (num_items && root != fs_info->chunk_root) {
struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
u64 delayed_refs_bytes = 0;
qgroup_reserved = num_items * fs_info->nodesize;
/*
* Use prealloc for now, as there might be a currently running
@ -608,20 +670,16 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
if (ret)
return ERR_PTR(ret);
/*
* We want to reserve all the bytes we may need all at once, so
* we only do 1 enospc flushing cycle per transaction start. We
* accomplish this by simply assuming we'll do num_items worth
* of delayed refs updates in this trans handle, and refill that
* amount for whatever is missing in the reserve.
*/
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
if (flush == BTRFS_RESERVE_FLUSH_ALL &&
!btrfs_block_rsv_full(delayed_refs_rsv)) {
delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
num_items);
num_bytes += delayed_refs_bytes;
}
/*
* If we plan to insert/update/delete "num_items" from a btree,
* we will also generate delayed refs for extent buffers in the
* respective btree paths, so reserve space for the delayed refs
* that will be generated by the caller as it modifies btrees.
* Try to reserve them to avoid excessive use of the global
* block reserve.
*/
delayed_refs_bytes = btrfs_calc_delayed_ref_bytes(fs_info, num_items);
/*
* Do the reservation for the relocation root creation
@ -631,16 +689,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true;
}
ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
ret = btrfs_reserve_trans_metadata(fs_info, flush, num_bytes,
&delayed_refs_bytes);
if (ret)
goto reserve_fail;
if (delayed_refs_bytes) {
btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
num_bytes -= delayed_refs_bytes;
}
btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
if (rsv->space_info->force_alloc)
btrfs_block_rsv_add_bytes(trans_rsv, num_bytes, true);
if (trans_rsv->space_info->force_alloc)
do_chunk_alloc = true;
} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
!btrfs_block_rsv_full(delayed_refs_rsv)) {
@ -700,6 +756,7 @@ again:
h->type = type;
INIT_LIST_HEAD(&h->new_bgs);
btrfs_init_metadata_block_rsv(fs_info, &h->delayed_rsv, BTRFS_BLOCK_RSV_DELOPS);
smp_mb();
if (cur_trans->state >= TRANS_STATE_COMMIT_START &&
@ -712,8 +769,17 @@ again:
if (num_bytes) {
trace_btrfs_space_reservation(fs_info, "transaction",
h->transid, num_bytes, 1);
h->block_rsv = &fs_info->trans_block_rsv;
h->block_rsv = trans_rsv;
h->bytes_reserved = num_bytes;
if (delayed_refs_bytes > 0) {
trace_btrfs_space_reservation(fs_info,
"local_delayed_refs_rsv",
h->transid,
delayed_refs_bytes, 1);
h->delayed_refs_bytes_reserved = delayed_refs_bytes;
btrfs_block_rsv_add_bytes(&h->delayed_rsv, delayed_refs_bytes, true);
delayed_refs_bytes = 0;
}
h->reloc_reserved = reloc_reserved;
}
@ -769,8 +835,10 @@ join_fail:
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes, NULL);
btrfs_block_rsv_release(fs_info, trans_rsv, num_bytes, NULL);
if (delayed_refs_bytes)
btrfs_space_info_free_bytes_may_use(fs_info, trans_rsv->space_info,
delayed_refs_bytes);
reserve_fail:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved);
return ERR_PTR(ret);
@ -817,7 +885,7 @@ struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *roo
}
/*
* btrfs_attach_transaction() - catch the running transaction
* Catch the running transaction.
*
* It is used when we want to commit the current the transaction, but
* don't want to start a new one.
@ -836,7 +904,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
}
/*
* btrfs_attach_transaction_barrier() - catch the running transaction
* Catch the running transaction.
*
* It is similar to the above function, the difference is this one
* will wait for all the inactive transactions until they fully
@ -912,7 +980,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
int ret = 0;
if (transid) {
if (transid <= fs_info->last_trans_committed)
if (transid <= btrfs_get_last_trans_committed(fs_info))
goto out;
/* find specified transaction */
@ -936,7 +1004,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
* raced with btrfs_commit_transaction
*/
if (!cur_trans) {
if (transid > fs_info->last_trans_committed)
if (transid > btrfs_get_last_trans_committed(fs_info))
ret = -EINVAL;
goto out;
}
@ -991,11 +1059,14 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
if (!trans->block_rsv) {
ASSERT(!trans->bytes_reserved);
ASSERT(!trans->delayed_refs_bytes_reserved);
return;
}
if (!trans->bytes_reserved)
if (!trans->bytes_reserved) {
ASSERT(!trans->delayed_refs_bytes_reserved);
return;
}
ASSERT(trans->block_rsv == &fs_info->trans_block_rsv);
trace_btrfs_space_reservation(fs_info, "transaction",
@ -1003,6 +1074,16 @@ static void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans)
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved, NULL);
trans->bytes_reserved = 0;
if (!trans->delayed_refs_bytes_reserved)
return;
trace_btrfs_space_reservation(fs_info, "local_delayed_refs_rsv",
trans->transid,
trans->delayed_refs_bytes_reserved, 0);
btrfs_block_rsv_release(fs_info, &trans->delayed_rsv,
trans->delayed_refs_bytes_reserved, NULL);
trans->delayed_refs_bytes_reserved = 0;
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
@ -1334,7 +1415,7 @@ again:
}
/* Now flush any delayed refs generated by updating all of the roots */
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
@ -1349,7 +1430,7 @@ again:
* so we want to keep this flushing in this loop to make sure
* everything gets run.
*/
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
return ret;
}
@ -1483,45 +1564,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
return 0;
}
/*
* defrag a given btree.
* Every leaf in the btree is read and defragged.
*/
int btrfs_defrag_root(struct btrfs_root *root)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_trans_handle *trans;
int ret;
if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
return 0;
while (1) {
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(info);
cond_resched();
if (btrfs_fs_closing(info) || ret != -EAGAIN)
break;
if (btrfs_defrag_cancelled(info)) {
btrfs_debug(info, "defrag_root cancelled");
ret = -EAGAIN;
break;
}
}
clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
return ret;
}
/*
* Do all special snapshot related qgroup dirty hack.
*
@ -1539,11 +1581,10 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
int ret;
/*
* Save some performance in the case that qgroups are not
* enabled. If this check races with the ioctl, rescan will
* kick in anyway.
* Save some performance in the case that qgroups are not enabled. If
* this check races with the ioctl, rescan will kick in anyway.
*/
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
if (!btrfs_qgroup_full_accounting(fs_info))
return 0;
/*
@ -1567,7 +1608,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* for now flush the delayed refs to narrow the race window where the
* qgroup counters could end up wrong.
*/
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret) {
btrfs_abort_transaction(trans, ret);
return ret;
@ -1582,7 +1623,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
/* Now qgroup are all updated, we can inherit it to new qgroups */
ret = btrfs_qgroup_inherit(trans, src->root_key.objectid, dst_objectid,
inherit);
parent->root_key.objectid, inherit);
if (ret < 0)
goto out;
@ -1732,6 +1773,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
ret = btrfs_create_qgroup(trans, objectid);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
}
/*
* pull in the delayed directory update
* and the delayed inode item
@ -1843,8 +1890,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* To co-operate with that hack, we do hack again.
* Or snapshot will be greatly slowed down by a subtree qgroup rescan
*/
ret = qgroup_account_snapshot(trans, root, parent_root,
pending->inherit, objectid);
if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_FULL)
ret = qgroup_account_snapshot(trans, root, parent_root,
pending->inherit, objectid);
else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE)
ret = btrfs_qgroup_inherit(trans, root->root_key.objectid, objectid,
parent_root->root_key.objectid, pending->inherit);
if (ret < 0)
goto fail;
@ -1862,7 +1913,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
fname.disk_name.len * 2);
inode_set_mtime_to_ts(parent_inode,
inode_set_ctime_current(parent_inode));
ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@ -2085,7 +2136,7 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
struct btrfs_block_group *block_group, *tmp;
list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
list_del_init(&block_group->bg_list);
}
}
@ -2404,7 +2455,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (ret)
goto unlock_reloc;
ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, U64_MAX);
if (ret)
goto unlock_reloc;
@ -2537,7 +2588,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
fs_info->last_trans_committed = cur_trans->transid;
btrfs_set_last_trans_committed(fs_info, cur_trans->transid);
/*
* We needn't acquire the lock here because there is no other task
* which can change it.
@ -2655,18 +2706,18 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info)
*/
void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *function,
unsigned int line, int errno, bool first_hit)
unsigned int line, int error, bool first_hit)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
WRITE_ONCE(trans->aborted, errno);
WRITE_ONCE(trans->transaction->aborted, errno);
if (first_hit && errno == -ENOSPC)
WRITE_ONCE(trans->aborted, error);
WRITE_ONCE(trans->transaction->aborted, error);
if (first_hit && error == -ENOSPC)
btrfs_dump_space_info_for_trans_abort(fs_info);
/* Wake up anybody who may be waiting on this transaction */
wake_up(&fs_info->transaction_wait);
wake_up(&fs_info->transaction_blocked_wait);
__btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
__btrfs_handle_fs_error(fs_info, function, line, error, NULL);
}
int __init btrfs_transaction_init(void)

View File

@ -118,8 +118,10 @@ enum {
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
u64 delayed_refs_bytes_reserved;
u64 chunk_bytes_reserved;
unsigned long delayed_ref_updates;
unsigned long delayed_ref_csum_deletions;
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
@ -139,6 +141,7 @@ struct btrfs_trans_handle {
bool in_fsync;
struct btrfs_fs_info *fs_info;
struct list_head new_bgs;
struct btrfs_block_rsv delayed_rsv;
};
/*
@ -172,7 +175,7 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
{
spin_lock(&inode->lock);
inode->last_trans = trans->transaction->transid;
inode->last_sub_trans = inode->root->log_transid;
inode->last_sub_trans = btrfs_get_root_log_transid(inode->root);
inode->last_log_commit = inode->last_sub_trans - 1;
spin_unlock(&inode->lock);
}
@ -200,32 +203,32 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
bool __cold abort_should_print_stack(int errno);
bool __cold abort_should_print_stack(int error);
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact stack trace is reported for some errors.
*/
#define btrfs_abort_transaction(trans, errno) \
#define btrfs_abort_transaction(trans, error) \
do { \
bool first = false; \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
first = true; \
if (WARN(abort_should_print_stack(errno), \
if (WARN(abort_should_print_stack(error), \
KERN_ERR \
"BTRFS: Transaction aborted (error %d)\n", \
(errno))) { \
(error))) { \
/* Stack trace printed. */ \
} else { \
btrfs_err((trans)->fs_info, \
"Transaction aborted (error %d)", \
(errno)); \
(error)); \
} \
} \
__btrfs_abort_transaction((trans), __func__, \
__LINE__, (errno), first); \
__LINE__, (error), first); \
} while (0)
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
@ -243,7 +246,6 @@ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info);
int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
@ -264,7 +266,7 @@ void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *function,
unsigned int line, int errno, bool first_hit);
unsigned int line, int error, bool first_hit);
int __init btrfs_transaction_init(void);
void __cold btrfs_transaction_exit(void);

View File

@ -29,6 +29,8 @@
#include "accessors.h"
#include "file-item.h"
#include "inode-item.h"
#include "dir-item.h"
#include "raid-stripe-tree.h"
/*
* Error message should follow the following format:
@ -1465,6 +1467,9 @@ static int check_extent_item(struct extent_buffer *leaf,
}
inline_refs += btrfs_shared_data_ref_count(leaf, sref);
break;
case BTRFS_EXTENT_OWNER_REF_KEY:
WARN_ON(!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA));
break;
default:
extent_err(leaf, slot, "unknown inline ref type: %u",
inline_type);
@ -1631,6 +1636,44 @@ static int check_inode_ref(struct extent_buffer *leaf,
return 0;
}
static int check_raid_stripe_extent(const struct extent_buffer *leaf,
const struct btrfs_key *key, int slot)
{
struct btrfs_stripe_extent *stripe_extent =
btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent);
if (unlikely(!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize))) {
generic_err(leaf, slot,
"invalid key objectid for raid stripe extent, have %llu expect aligned to %u",
key->objectid, leaf->fs_info->sectorsize);
return -EUCLEAN;
}
if (unlikely(!btrfs_fs_incompat(leaf->fs_info, RAID_STRIPE_TREE))) {
generic_err(leaf, slot,
"RAID_STRIPE_EXTENT present but RAID_STRIPE_TREE incompat bit unset");
return -EUCLEAN;
}
switch (btrfs_stripe_extent_encoding(leaf, stripe_extent)) {
case BTRFS_STRIPE_RAID0:
case BTRFS_STRIPE_RAID1:
case BTRFS_STRIPE_DUP:
case BTRFS_STRIPE_RAID10:
case BTRFS_STRIPE_RAID5:
case BTRFS_STRIPE_RAID6:
case BTRFS_STRIPE_RAID1C3:
case BTRFS_STRIPE_RAID1C4:
break;
default:
generic_err(leaf, slot, "invalid raid stripe encoding %u",
btrfs_stripe_extent_encoding(leaf, stripe_extent));
return -EUCLEAN;
}
return 0;
}
/*
* Common point to switch the item-specific validation.
*/
@ -1685,6 +1728,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
case BTRFS_EXTENT_DATA_REF_KEY:
ret = check_extent_data_ref(leaf, key, slot);
break;
case BTRFS_RAID_STRIPE_KEY:
ret = check_raid_stripe_extent(leaf, key, slot);
break;
}
if (ret)
@ -2005,7 +2051,7 @@ int btrfs_verify_level_key(struct extent_buffer *eb, int level,
* So we only checks tree blocks which is read from disk, whose
* generation <= fs_info->last_trans_committed.
*/
if (btrfs_header_generation(eb) > fs_info->last_trans_committed)
if (btrfs_header_generation(eb) > btrfs_get_last_trans_committed(fs_info))
return 0;
/* We have @first_key, so this @eb must have at least one item */

View File

@ -347,8 +347,7 @@ static int process_one_buffer(struct btrfs_root *log,
}
if (wc->pin) {
ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
eb->len);
ret = btrfs_pin_extent_for_log_replay(wc->trans, eb);
if (ret)
return ret;
@ -504,9 +503,9 @@ insert:
found_size = btrfs_item_size(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
btrfs_truncate_item(path, item_size, 1);
btrfs_truncate_item(trans, path, item_size, 1);
else if (found_size < item_size)
btrfs_extend_item(path, item_size - found_size);
btrfs_extend_item(trans, path, item_size - found_size);
} else if (ret) {
return ret;
}
@ -574,7 +573,7 @@ insert:
}
}
no_copy:
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@ -767,7 +766,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
} else if (ret == 0) {
btrfs_init_generic_ref(&ref,
BTRFS_ADD_DELAYED_REF,
ins.objectid, ins.offset, 0);
ins.objectid, ins.offset, 0,
root->root_key.objectid);
btrfs_init_data_ref(&ref,
root->root_key.objectid,
key->objectid, offset, 0, false);
@ -890,7 +890,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
update_inode:
btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
out:
iput(inode);
return ret;
@ -1445,7 +1445,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
if (ret)
goto out;
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
goto out;
}
@ -1483,8 +1483,7 @@ out:
return ret;
}
static int count_inode_extrefs(struct btrfs_root *root,
struct btrfs_inode *inode, struct btrfs_path *path)
static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret = 0;
int name_len;
@ -1498,8 +1497,8 @@ static int count_inode_extrefs(struct btrfs_root *root,
struct extent_buffer *leaf;
while (1) {
ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
&extref, &offset);
ret = btrfs_find_one_extref(inode->root, inode_objectid, offset,
path, &extref, &offset);
if (ret)
break;
@ -1527,8 +1526,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
return nlink;
}
static int count_inode_refs(struct btrfs_root *root,
struct btrfs_inode *inode, struct btrfs_path *path)
static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret;
struct btrfs_key key;
@ -1543,7 +1541,7 @@ static int count_inode_refs(struct btrfs_root *root,
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0);
if (ret < 0)
break;
if (ret > 0) {
@ -1595,9 +1593,9 @@ process_slot:
* will free the inode.
*/
static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret;
u64 nlink = 0;
@ -1607,13 +1605,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
ret = count_inode_refs(root, BTRFS_I(inode), path);
ret = count_inode_refs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
nlink = ret;
ret = count_inode_extrefs(root, BTRFS_I(inode), path);
ret = count_inode_extrefs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
@ -1623,7 +1621,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (nlink != inode->i_nlink) {
set_nlink(inode, nlink);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
goto out;
}
@ -1685,7 +1683,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
}
ret = fixup_inode_link_count(trans, root, inode);
ret = fixup_inode_link_count(trans, inode);
iput(inode);
if (ret)
break;
@ -1732,7 +1730,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
set_nlink(inode, 1);
else
inc_nlink(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
} else if (ret == -EEXIST) {
ret = 0;
}
@ -1939,7 +1937,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
out:
if (!ret && update_size) {
btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name.len * 2);
ret = btrfs_update_inode(trans, root, BTRFS_I(dir));
ret = btrfs_update_inode(trans, BTRFS_I(dir));
}
kfree(name.name);
iput(dir);
@ -2483,7 +2481,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
drop_args.bytes_found);
/* Update the inode's nbytes. */
ret = btrfs_update_inode(wc->trans,
root, BTRFS_I(inode));
BTRFS_I(inode));
}
iput(inode);
if (ret)
@ -2574,7 +2572,7 @@ static int clean_log_buffer(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(eb);
if (trans) {
ret = btrfs_pin_reserved_extent(trans, eb->start, eb->len);
ret = btrfs_pin_reserved_extent(trans, eb);
if (ret)
return ret;
btrfs_redirty_list_add(trans->transaction, eb);
@ -2848,10 +2846,9 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
}
/*
* btrfs_sync_log does sends a given tree log down to the disk and
* updates the super blocks to record it. When this call is done,
* you know that any inodes previously logged are safely on disk only
* if it returns 0.
* Sends a given tree log down to the disk and updates the super blocks to
* record it. When this call is done, you know that any inodes previously
* logged are safely on disk only if it returns 0.
*
* Any other return value means you need to call btrfs_commit_transaction.
* Some of the edge cases for fsyncing directories that have had unlinks
@ -2961,7 +2958,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_root_node(&log->root_item, log->node);
memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
root->log_transid++;
btrfs_set_root_log_transid(root, root->log_transid + 1);
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
@ -2999,9 +2996,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
ret = update_log_root(trans, log, &new_root_item);
if (ret) {
if (!list_empty(&root_log_ctx.list))
list_del_init(&root_log_ctx.list);
list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
btrfs_set_log_full_commit(trans);
if (ret != -ENOSPC)
@ -3021,7 +3016,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out;
}
index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_tree_log_extents(log, mark);
@ -3136,8 +3130,8 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* someone else already started it. We use <= and not < because the
* first log transaction has an ID of 0.
*/
ASSERT(root->last_log_commit <= log_transid);
root->last_log_commit = log_transid;
ASSERT(btrfs_get_root_last_log_commit(root) <= log_transid);
btrfs_set_root_last_log_commit(root, log_transid);
out_wake_log_root:
mutex_lock(&log_root_tree->log_mutex);
@ -3211,8 +3205,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
}
}
clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
extent_io_tree_release(&log->dirty_log_pages);
extent_io_tree_release(&log->log_csum_range);
btrfs_put_root(log);
@ -3530,7 +3523,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
last_offset = max(last_offset, curr_end);
}
btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_mark_buffer_dirty(trans, path->nodes[0]);
btrfs_release_path(path);
return 0;
}
@ -4488,7 +4481,7 @@ copy_item:
dst_index++;
}
btrfs_mark_buffer_dirty(dst_path->nodes[0]);
btrfs_mark_buffer_dirty(trans, dst_path->nodes[0]);
btrfs_release_path(dst_path);
out:
kfree(ins_data);
@ -4693,7 +4686,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, &fi,
btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(fi));
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
btrfs_release_path(path);
@ -4921,12 +4914,12 @@ process:
set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
spin_lock_irq(&inode->ordered_tree.lock);
spin_lock_irq(&inode->ordered_tree_lock);
if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
atomic_inc(&trans->transaction->pending_ordered);
}
spin_unlock_irq(&inode->ordered_tree.lock);
spin_unlock_irq(&inode->ordered_tree_lock);
}
btrfs_put_ordered_extent(ordered);
}
@ -7204,9 +7197,7 @@ again:
* each subsequent pass.
*/
if (ret == -ENOENT)
ret = btrfs_pin_extent_for_log_replay(trans,
log->node->start,
log->node->len);
ret = btrfs_pin_extent_for_log_replay(trans, log->node);
btrfs_put_root(log);
if (!ret)

View File

@ -223,7 +223,8 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
}
/*
* ulist_del - delete one node from ulist
* Delete one node from ulist.
*
* @ulist: ulist to remove node from
* @val: value to delete
* @aux: aux to delete

View File

@ -124,7 +124,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
btrfs_extend_item(path, sizeof(subid_le));
btrfs_extend_item(trans, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
@ -139,7 +139,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
ret = 0;
subid_le = cpu_to_le64(subid_cpu);
write_extent_buffer(eb, &subid_le, offset, sizeof(subid_le));
btrfs_mark_buffer_dirty(eb);
btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);
@ -221,7 +221,7 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type,
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
btrfs_truncate_item(path, item_size - sizeof(subid), 1);
btrfs_truncate_item(trans, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);

View File

@ -487,7 +487,7 @@ static int rollback_verity(struct btrfs_inode *inode)
}
inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@ -554,7 +554,7 @@ static int finish_verity(struct btrfs_inode *inode, const void *desc,
}
inode->ro_flags |= BTRFS_INODE_RO_VERITY;
btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode);
if (ret)
goto end_trans;
ret = del_orphan(trans, inode);

View File

@ -35,6 +35,7 @@
#include "relocation.h"
#include "scrub.h"
#include "super.h"
#include "raid-stripe-tree.h"
#define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID10 | \
@ -357,21 +358,19 @@ struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
}
/*
* alloc_fs_devices - allocate struct btrfs_fs_devices
* @fsid: if not NULL, copy the UUID to fs_devices::fsid
* @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
* Allocate new btrfs_fs_devices structure identified by a fsid.
*
* @fsid: if not NULL, copy the UUID to fs_devices::fsid and to
* fs_devices::metadata_fsid
*
* Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
* The returned struct is not linked onto any lists and can be destroyed with
* kfree() right away.
*/
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
const u8 *metadata_fsid)
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{
struct btrfs_fs_devices *fs_devs;
ASSERT(fsid || !metadata_fsid);
fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
if (!fs_devs)
return ERR_PTR(-ENOMEM);
@ -385,8 +384,7 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
if (fsid) {
memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
memcpy(fs_devs->metadata_uuid,
metadata_fsid ?: fsid, BTRFS_FSID_SIZE);
memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
}
return fs_devs;
@ -457,58 +455,6 @@ static noinline struct btrfs_fs_devices *find_fsid(
return NULL;
}
/*
* First check if the metadata_uuid is different from the fsid in the given
* fs_devices. Then check if the given fsid is the same as the metadata_uuid
* in the fs_devices. If it is, return true; otherwise, return false.
*/
static inline bool check_fsid_changed(const struct btrfs_fs_devices *fs_devices,
const u8 *fsid)
{
return memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) != 0 &&
memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE) == 0;
}
static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by first scanning
* a device which didn't have its fsid/metadata_uuid changed
* at all and the CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (!fs_devices->fsid_change)
continue;
if (match_fsid_fs_devices(fs_devices, disk_super->metadata_uuid,
fs_devices->fsid))
return fs_devices;
}
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by a device that
* has an outdated pair of fsid/metadata_uuid and
* CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (!fs_devices->fsid_change)
continue;
if (check_fsid_changed(fs_devices, disk_super->metadata_uuid))
return fs_devices;
}
return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
}
static int
btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder,
int flush, struct bdev_handle **bdev_handle,
@ -564,13 +510,13 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
{
struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
struct btrfs_device *device, *tmp_device;
int ret = 0;
int ret;
bool freed = false;
lockdep_assert_held(&uuid_mutex);
if (devt)
ret = -ENOENT;
/* Return good status if there is no instance of devt. */
ret = 0;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
mutex_lock(&fs_devices->device_list_mutex);
@ -581,8 +527,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
if (devt && devt != device->devt)
continue;
if (fs_devices->opened) {
/* for an already deleted device return 0 */
if (devt && ret != 0)
if (devt)
ret = -EBUSY;
break;
}
@ -592,7 +537,7 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
list_del(&device->dev_list);
btrfs_free_device(device);
ret = 0;
freed = true;
}
mutex_unlock(&fs_devices->device_list_mutex);
@ -603,9 +548,81 @@ static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device
}
}
/* If there is at least one freed device return 0. */
if (freed)
return 0;
return ret;
}
static struct btrfs_fs_devices *find_fsid_by_device(
struct btrfs_super_block *disk_super,
dev_t devt, bool *same_fsid_diff_dev)
{
struct btrfs_fs_devices *fsid_fs_devices;
struct btrfs_fs_devices *devt_fs_devices;
const bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
bool found_by_devt = false;
/* Find the fs_device by the usual method, if found use it. */
fsid_fs_devices = find_fsid(disk_super->fsid,
has_metadata_uuid ? disk_super->metadata_uuid : NULL);
/* The temp_fsid feature is supported only with single device filesystem. */
if (btrfs_super_num_devices(disk_super) != 1)
return fsid_fs_devices;
/*
* A seed device is an integral component of the sprout device, which
* functions as a multi-device filesystem. So, temp-fsid feature is
* not supported.
*/
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)
return fsid_fs_devices;
/* Try to find a fs_devices by matching devt. */
list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) {
struct btrfs_device *device;
list_for_each_entry(device, &devt_fs_devices->devices, dev_list) {
if (device->devt == devt) {
found_by_devt = true;
break;
}
}
if (found_by_devt)
break;
}
if (found_by_devt) {
/* Existing device. */
if (fsid_fs_devices == NULL) {
if (devt_fs_devices->opened == 0) {
/* Stale device. */
return NULL;
} else {
/* temp_fsid is mounting a subvol. */
return devt_fs_devices;
}
} else {
/* Regular or temp_fsid device mounting a subvol. */
return devt_fs_devices;
}
} else {
/* New device. */
if (fsid_fs_devices == NULL) {
return NULL;
} else {
/* sb::fsid is already used create a new temp_fsid. */
*same_fsid_diff_dev = true;
return NULL;
}
}
/* Not reached. */
}
/*
* This is only used on mount, and we are protected from competing things
* messing with our fs_devices by the uuid_mutex, thus we do not need the
@ -691,84 +708,6 @@ u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb)
return has_metadata_uuid ? sb->metadata_uuid : sb->fsid;
}
/*
* Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
* being created with a disk that has already completed its fsid change. Such
* disk can belong to an fs which has its FSID changed or to one which doesn't.
* Handle both cases here.
*/
static struct btrfs_fs_devices *find_fsid_inprogress(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (fs_devices->fsid_change)
continue;
if (check_fsid_changed(fs_devices, disk_super->fsid))
return fs_devices;
}
return find_fsid(disk_super->fsid, NULL);
}
static struct btrfs_fs_devices *find_fsid_changed(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
/*
* Handles the case where scanned device is part of an fs that had
* multiple successful changes of FSID but currently device didn't
* observe it. Meaning our fsid will be different than theirs. We need
* to handle two subcases :
* 1 - The fs still continues to have different METADATA/FSID uuids.
* 2 - The fs is switched back to its original FSID (METADATA/FSID
* are equal).
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
/* Changed UUIDs */
if (check_fsid_changed(fs_devices, disk_super->metadata_uuid) &&
memcmp(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE) != 0)
return fs_devices;
/* Unchanged UUIDs */
if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
BTRFS_FSID_SIZE) == 0 &&
memcmp(fs_devices->fsid, disk_super->metadata_uuid,
BTRFS_FSID_SIZE) == 0)
return fs_devices;
}
return NULL;
}
static struct btrfs_fs_devices *find_fsid_reverted_metadata(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
/*
* Handle the case where the scanned device is part of an fs whose last
* metadata UUID change reverted it to the original FSID. At the same
* time fs_devices was first created by another constituent device
* which didn't fully observe the operation. This results in an
* btrfs_fs_devices created with metadata/fsid different AND
* btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
* fs_devices equal to the FSID of the disk.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (!fs_devices->fsid_change)
continue;
if (check_fsid_changed(fs_devices, disk_super->fsid))
return fs_devices;
}
return NULL;
}
/*
* Add new device to list of registered devices
*
@ -787,10 +726,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_t path_devt;
int error;
bool same_fsid_diff_dev = false;
bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
btrfs_err(NULL,
"device %s has incomplete metadata_uuid change, please use btrfstune to complete",
path);
return ERR_PTR(-EAGAIN);
}
error = lookup_bdev(path, &path_devt);
if (error) {
@ -799,27 +744,23 @@ static noinline struct btrfs_device *device_list_add(const char *path,
return ERR_PTR(error);
}
if (fsid_change_in_progress) {
if (!has_metadata_uuid)
fs_devices = find_fsid_inprogress(disk_super);
else
fs_devices = find_fsid_changed(disk_super);
} else if (has_metadata_uuid) {
fs_devices = find_fsid_with_metadata_uuid(disk_super);
} else {
fs_devices = find_fsid_reverted_metadata(disk_super);
if (!fs_devices)
fs_devices = find_fsid(disk_super->fsid, NULL);
}
fs_devices = find_fsid_by_device(disk_super, path_devt, &same_fsid_diff_dev);
if (!fs_devices) {
fs_devices = alloc_fs_devices(disk_super->fsid,
has_metadata_uuid ? disk_super->metadata_uuid : NULL);
fs_devices = alloc_fs_devices(disk_super->fsid);
if (has_metadata_uuid)
memcpy(fs_devices->metadata_uuid,
disk_super->metadata_uuid, BTRFS_FSID_SIZE);
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
fs_devices->fsid_change = fsid_change_in_progress;
if (same_fsid_diff_dev) {
generate_random_uuid(fs_devices->fsid);
fs_devices->temp_fsid = true;
pr_info("BTRFS: device %s using temp-fsid %pU\n",
path, fs_devices->fsid);
}
mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
@ -834,18 +775,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
mutex_lock(&fs_devices->device_list_mutex);
device = btrfs_find_device(fs_devices, &args);
/*
* If this disk has been pulled into an fs devices created by
* a device which had the CHANGING_FSID_V2 flag then replace the
* metadata_uuid/fsid values of the fs_devices.
*/
if (fs_devices->fsid_change &&
found_transid > fs_devices->latest_generation) {
if (found_transid > fs_devices->latest_generation) {
memcpy(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE);
memcpy(fs_devices->metadata_uuid,
btrfs_sb_fsid_ptr(disk_super), BTRFS_FSID_SIZE);
fs_devices->fsid_change = false;
}
}
@ -999,7 +933,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
lockdep_assert_held(&uuid_mutex);
fs_devices = alloc_fs_devices(orig->fsid, NULL);
fs_devices = alloc_fs_devices(orig->fsid);
if (IS_ERR(fs_devices))
return fs_devices;
@ -1359,9 +1293,14 @@ int btrfs_forget_devices(dev_t devt)
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
* is read via pagecache
* is read via pagecache.
*
* With @mount_arg_dev it's a scan during mount time that will always register
* the device or return an error. Multi-device and seeding devices are registered
* in both cases.
*/
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags)
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
bool mount_arg_dev)
{
struct btrfs_super_block *disk_super;
bool new_device_added = false;
@ -1407,10 +1346,27 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags)
goto error_bdev_put;
}
if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
!(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) {
dev_t devt;
ret = lookup_bdev(path, &devt);
if (ret)
btrfs_warn(NULL, "lookup bdev failed for path %s: %d",
path, ret);
else
btrfs_free_stale_devices(devt, NULL);
pr_debug("BTRFS: skip registering single non-seed device %s\n", path);
device = NULL;
goto free_disk_super;
}
device = device_list_add(path, disk_super, &new_device_added);
if (!IS_ERR(device) && new_device_added)
btrfs_free_stale_devices(device->devt, device);
free_disk_super:
btrfs_release_disk_super(disk_super);
error_bdev_put:
@ -1898,7 +1854,7 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
ret = 0;
out:
@ -2454,7 +2410,7 @@ static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
* Private copy of the seed devices, anchored at
* fs_info->fs_devices->seed_list
*/
seed_devices = alloc_fs_devices(NULL, NULL);
seed_devices = alloc_fs_devices(NULL);
if (IS_ERR(seed_devices))
return seed_devices;
@ -2600,7 +2556,7 @@ next_slot:
if (device->fs_devices->seeding) {
btrfs_set_device_generation(leaf, dev_item,
device->generation);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
}
path->slots[0]++;
@ -2898,7 +2854,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
@ -2932,6 +2888,7 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
btrfs_set_super_total_bytes(super_copy,
round_down(old_total + diff, fs_info->sectorsize));
device->fs_devices->total_rw_bytes += diff;
atomic64_add(diff, &fs_info->free_chunk_space);
btrfs_device_set_total_bytes(device, new_size);
btrfs_device_set_disk_total_bytes(device, new_size);
@ -3030,7 +2987,8 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
}
/*
* btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
* Find the mapping containing the given logical extent.
*
* @logical: Logical block offset in bytes.
* @length: Length of extent in bytes.
*
@ -3486,7 +3444,7 @@ static int insert_balance_item(struct btrfs_fs_info *fs_info,
btrfs_set_balance_flags(leaf, item, bctl->flags);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
@ -4841,6 +4799,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
u64 old_size = btrfs_device_get_total_bytes(device);
u64 diff;
u64 start;
u64 free_diff = 0;
new_size = round_down(new_size, fs_info->sectorsize);
start = new_size;
@ -4866,7 +4825,19 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
btrfs_device_set_total_bytes(device, new_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
atomic64_sub(diff, &fs_info->free_chunk_space);
/*
* The new free_chunk_space is new_size - used, so we have to
* subtract the delta of the old free_chunk_space which included
* old_size - used. If used > new_size then just subtract this
* entire device's free space.
*/
if (device->bytes_used < new_size)
free_diff = (old_size - device->bytes_used) -
(new_size - device->bytes_used);
else
free_diff = old_size - device->bytes_used;
atomic64_sub(free_diff, &fs_info->free_chunk_space);
}
/*
@ -5001,9 +4972,10 @@ done:
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes += diff;
atomic64_add(diff, &fs_info->free_chunk_space);
atomic64_add(free_diff, &fs_info->free_chunk_space);
}
mutex_unlock(&fs_info->chunk_mutex);
}
return ret;
@ -5883,6 +5855,7 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
}
static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
u64 logical,
u16 total_stripes)
{
struct btrfs_io_context *bioc;
@ -5902,6 +5875,7 @@ static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_
bioc->fs_info = fs_info;
bioc->replace_stripe_src = -1;
bioc->full_stripe_logical = (u64)-1;
bioc->logical = logical;
return bioc;
}
@ -6206,12 +6180,20 @@ static u64 btrfs_max_io_len(struct map_lookup *map, enum btrfs_map_op op,
return U64_MAX;
}
static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *map,
u32 stripe_index, u64 stripe_offset, u32 stripe_nr)
static int set_io_stripe(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length, struct btrfs_io_stripe *dst,
struct map_lookup *map, u32 stripe_index,
u64 stripe_offset, u64 stripe_nr)
{
dst->dev = map->stripes[stripe_index].dev;
if (op == BTRFS_MAP_READ && btrfs_need_stripe_tree_update(fs_info, map->type))
return btrfs_get_raid_extent_offset(fs_info, logical, length,
map->type, stripe_index, dst);
dst->physical = map->stripes[stripe_index].physical +
stripe_offset + btrfs_stripe_nr_to_offset(stripe_nr);
return 0;
}
/*
@ -6248,16 +6230,11 @@ static void set_io_stripe(struct btrfs_io_stripe *dst, const struct map_lookup *
* For RAID6 profile, mirror > 2 means mark another
* data/P stripe error and rebuild from the remaining
* stripes..
*
* @need_raid_map: (Used only for integrity checker) whether the map wants
* a full stripe map (including all data and P/Q stripes)
* for RAID56. Should always be 1 except integrity checker.
*/
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret,
struct btrfs_io_stripe *smap, int *mirror_num_ret,
int need_raid_map)
struct btrfs_io_stripe *smap, int *mirror_num_ret)
{
struct extent_map *em;
struct map_lookup *map;
@ -6352,8 +6329,10 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
if (need_raid_map && (op != BTRFS_MAP_READ || mirror_num > 1)) {
if (op != BTRFS_MAP_READ || mirror_num > 1) {
/*
* Needs full stripe mapping.
*
* Push stripe_nr back to the start of the full stripe
* For those cases needing a full stripe, @stripe_nr
* is the full stripe number.
@ -6376,19 +6355,14 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
stripe_index = 0;
stripe_offset = 0;
} else {
/*
* Mirror #0 or #1 means the original data block.
* Mirror #2 is RAID5 parity block.
* Mirror #3 is RAID6 Q block.
*/
ASSERT(mirror_num <= 1);
/* Just grab the data stripe directly. */
stripe_index = stripe_nr % data_stripes;
stripe_nr /= data_stripes;
if (mirror_num > 1)
stripe_index = data_stripes + mirror_num - 2;
/* We distribute the parity blocks across stripes */
stripe_index = (stripe_nr + stripe_index) % map->num_stripes;
if (op == BTRFS_MAP_READ && mirror_num <= 1)
if (op == BTRFS_MAP_READ && mirror_num < 1)
mirror_num = 1;
}
} else {
@ -6427,16 +6401,18 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* I/O context structure.
*/
if (smap && num_alloc_stripes == 1 &&
!(btrfs_need_stripe_tree_update(fs_info, map->type) &&
op != BTRFS_MAP_READ) &&
!((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && mirror_num > 1)) {
set_io_stripe(smap, map, stripe_index, stripe_offset, stripe_nr);
ret = set_io_stripe(fs_info, op, logical, length, smap, map,
stripe_index, stripe_offset, stripe_nr);
if (mirror_num_ret)
*mirror_num_ret = mirror_num;
*bioc_ret = NULL;
ret = 0;
goto out;
}
bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes);
bioc = alloc_btrfs_io_context(fs_info, logical, num_alloc_stripes);
if (!bioc) {
ret = -ENOMEM;
goto out;
@ -6450,7 +6426,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
*
* It's still mostly the same as other profiles, just with extra rotation.
*/
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
(op != BTRFS_MAP_READ || mirror_num > 1)) {
/*
* For RAID56 @stripe_nr is already the number of full stripes
@ -6462,22 +6438,35 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
*/
bioc->full_stripe_logical = em->start +
btrfs_stripe_nr_to_offset(stripe_nr * data_stripes);
for (i = 0; i < num_stripes; i++)
set_io_stripe(&bioc->stripes[i], map,
(i + stripe_nr) % num_stripes,
stripe_offset, stripe_nr);
for (int i = 0; i < num_stripes; i++) {
ret = set_io_stripe(fs_info, op, logical, length,
&bioc->stripes[i], map,
(i + stripe_nr) % num_stripes,
stripe_offset, stripe_nr);
if (ret < 0)
break;
}
} else {
/*
* For all other non-RAID56 profiles, just copy the target
* stripe into the bioc.
*/
for (i = 0; i < num_stripes; i++) {
set_io_stripe(&bioc->stripes[i], map, stripe_index,
stripe_offset, stripe_nr);
ret = set_io_stripe(fs_info, op, logical, length,
&bioc->stripes[i], map, stripe_index,
stripe_offset, stripe_nr);
if (ret < 0)
break;
stripe_index++;
}
}
if (ret) {
*bioc_ret = NULL;
btrfs_put_bioc(bioc);
goto out;
}
if (op != BTRFS_MAP_READ)
max_errors = btrfs_chunk_max_errors(map);
@ -6904,7 +6893,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
if (!btrfs_test_opt(fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
fs_devices = alloc_fs_devices(fsid, NULL);
fs_devices = alloc_fs_devices(fsid);
if (IS_ERR(fs_devices))
return fs_devices;
@ -7537,7 +7526,7 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans,
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
btrfs_mark_buffer_dirty(eb);
btrfs_mark_buffer_dirty(trans, eb);
out:
btrfs_free_path(path);
@ -8079,7 +8068,7 @@ int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
ASSERT(mirror_num > 0);
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical, &map_length,
&bioc, smap, &mirror_ret, true);
&bioc, smap, &mirror_ret);
if (ret < 0)
return ret;

View File

@ -288,6 +288,19 @@ struct btrfs_fs_devices {
* - Following shall be true at all times:
* - metadata_uuid == btrfs_header::fsid
* - metadata_uuid == btrfs_dev_item::fsid
*
* - Relations between fsid and metadata_uuid in sb and fs_devices:
* - Normal:
* fs_devices->fsid == fs_devices->metadata_uuid == sb->fsid
* sb->metadata_uuid == 0
*
* - When the BTRFS_FEATURE_INCOMPAT_METADATA_UUID flag is set:
* fs_devices->fsid == sb->fsid
* fs_devices->metadata_uuid == sb->metadata_uuid
*
* - When in-memory fs_devices->temp_fsid is true
* fs_devices->fsid = random
* fs_devices->metadata_uuid == sb->fsid
*/
u8 metadata_uuid[BTRFS_FSID_SIZE];
@ -351,9 +364,10 @@ struct btrfs_fs_devices {
bool rotating;
/* Devices support TRIM/discard commands. */
bool discardable;
bool fsid_change;
/* The filesystem is a seed filesystem. */
bool seeding;
/* The mount needs to use a randomly generated fsid. */
bool temp_fsid;
struct btrfs_fs_info *fs_info;
/* sysfs kobjects */
@ -379,12 +393,12 @@ struct btrfs_fs_devices {
struct btrfs_io_stripe {
struct btrfs_device *dev;
union {
/* Block mapping */
u64 physical;
/* For the endio handler */
struct btrfs_io_context *bioc;
};
/* Block mapping. */
u64 physical;
u64 length;
bool is_scrub;
/* For the endio handler. */
struct btrfs_io_context *bioc;
};
struct btrfs_discard_stripe {
@ -417,6 +431,11 @@ struct btrfs_io_context {
atomic_t error;
u16 max_errors;
u64 logical;
u64 size;
/* Raid stripe tree ordered entry. */
struct list_head rst_ordered_entry;
/*
* The total number of stripes, including the extra duplicated
* stripe for replace.
@ -594,8 +613,7 @@ void btrfs_put_bioc(struct btrfs_io_context *bioc);
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_io_context **bioc_ret,
struct btrfs_io_stripe *smap, int *mirror_num_ret,
int need_raid_map);
struct btrfs_io_stripe *smap, int *mirror_num_ret);
int btrfs_map_repair_block(struct btrfs_fs_info *fs_info,
struct btrfs_io_stripe *smap, u64 logical,
u32 length, int mirror_num);
@ -609,7 +627,8 @@ struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
void btrfs_mapping_tree_free(struct extent_map_tree *tree);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
blk_mode_t flags, void *holder);
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags);
struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
bool mount_arg_dev);
int btrfs_forget_devices(dev_t devt);
void btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices);

View File

@ -188,15 +188,15 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
btrfs_extend_item(path, size - old_data_len);
btrfs_extend_item(trans, path, size - old_data_len);
else if (size < old_data_len)
btrfs_truncate_item(path, data_size, 1);
btrfs_truncate_item(trans, path, data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
btrfs_extend_item(path, data_size);
btrfs_extend_item(trans, path, data_size);
}
ptr = btrfs_item_ptr(leaf, slot, char);
@ -205,7 +205,7 @@ int btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode,
btrfs_set_dir_data_len(leaf, di, size);
data_ptr = ((unsigned long)(di + 1)) + name_len;
write_extent_buffer(leaf, value, data_ptr, size);
btrfs_mark_buffer_dirty(leaf);
btrfs_mark_buffer_dirty(trans, leaf);
} else {
/*
* Insert, and we had space for the xattr, so path->slots[0] is
@ -265,7 +265,7 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@ -408,7 +408,7 @@ static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
if (!ret) {
inode_inc_iversion(inode);
inode_set_ctime_current(inode);
ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
ret = btrfs_update_inode(trans, BTRFS_I(inode));
if (ret)
btrfs_abort_transaction(trans, ret);
}

View File

@ -1282,21 +1282,284 @@ out:
return ret;
}
struct zone_info {
u64 physical;
u64 capacity;
u64 alloc_offset;
};
static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct zone_info *info, unsigned long *active,
struct map_lookup *map)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
struct btrfs_device *device = map->stripes[zone_idx].dev;
int dev_replace_is_ongoing = 0;
unsigned int nofs_flag;
struct blk_zone zone;
int ret;
info->physical = map->stripes[zone_idx].physical;
if (!device->bdev) {
info->alloc_offset = WP_MISSING_DEV;
return 0;
}
/* Consider a zone as active if we can allow any number of active zones. */
if (!device->zone_info->max_active_zones)
__set_bit(zone_idx, active);
if (!btrfs_dev_is_sequential(device, info->physical)) {
info->alloc_offset = WP_CONVENTIONAL;
return 0;
}
/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);
down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
up_read(&dev_replace->rwsem);
/*
* The group is mapped to a sequential zone. Get the zone write pointer
* to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
if (ret) {
if (ret != -EIO && ret != -EOPNOTSUPP)
return ret;
info->alloc_offset = WP_MISSING_DEV;
return 0;
}
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
btrfs_err_in_rcu(fs_info,
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
device->devid);
return -EIO;
}
info->capacity = (zone.capacity << SECTOR_SHIFT);
switch (zone.cond) {
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
btrfs_err(fs_info,
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
(info->physical >> device->zone_info->zone_size_shift),
rcu_str_deref(device->name), device->devid);
info->alloc_offset = WP_MISSING_DEV;
break;
case BLK_ZONE_COND_EMPTY:
info->alloc_offset = 0;
break;
case BLK_ZONE_COND_FULL:
info->alloc_offset = info->capacity;
break;
default:
/* Partially used zone. */
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
__set_bit(zone_idx, active);
break;
}
return 0;
}
static int btrfs_load_block_group_single(struct btrfs_block_group *bg,
struct zone_info *info,
unsigned long *active)
{
if (info->alloc_offset == WP_MISSING_DEV) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
info->physical);
return -EIO;
}
bg->alloc_offset = info->alloc_offset;
bg->zone_capacity = info->capacity;
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
return 0;
}
static int btrfs_load_block_group_dup(struct btrfs_block_group *bg,
struct map_lookup *map,
struct zone_info *zone_info,
unsigned long *active)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree");
return -EINVAL;
}
if (zone_info[0].alloc_offset == WP_MISSING_DEV) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
zone_info[0].physical);
return -EIO;
}
if (zone_info[1].alloc_offset == WP_MISSING_DEV) {
btrfs_err(bg->fs_info,
"zoned: cannot recover write pointer for zone %llu",
zone_info[1].physical);
return -EIO;
}
if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) {
btrfs_err(bg->fs_info,
"zoned: write pointer offset mismatch of zones in DUP profile");
return -EIO;
}
if (test_bit(0, active) != test_bit(1, active)) {
if (!btrfs_zone_activate(bg))
return -EIO;
} else if (test_bit(0, active)) {
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
bg->alloc_offset = zone_info[0].alloc_offset;
bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity);
return 0;
}
static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg,
struct map_lookup *map,
struct zone_info *zone_info,
unsigned long *active)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
int i;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
}
for (i = 0; i < map->num_stripes; i++) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
continue;
if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) &&
!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_err(fs_info,
"zoned: write pointer offset mismatch of zones in %s profile",
btrfs_bg_type_to_raid_name(map->type));
return -EIO;
}
if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_test_opt(fs_info, DEGRADED) &&
!btrfs_zone_activate(bg)) {
return -EIO;
}
} else {
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
/* In case a device is missing we have a cap of 0, so don't use it. */
bg->zone_capacity = min_not_zero(zone_info[0].capacity,
zone_info[1].capacity);
}
if (zone_info[0].alloc_offset != WP_MISSING_DEV)
bg->alloc_offset = zone_info[0].alloc_offset;
else
bg->alloc_offset = zone_info[i - 1].alloc_offset;
return 0;
}
static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg,
struct map_lookup *map,
struct zone_info *zone_info,
unsigned long *active)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
}
for (int i = 0; i < map->num_stripes; i++) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
continue;
if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_zone_activate(bg))
return -EIO;
} else {
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
bg->zone_capacity += zone_info[i].capacity;
bg->alloc_offset += zone_info[i].alloc_offset;
}
return 0;
}
static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg,
struct map_lookup *map,
struct zone_info *zone_info,
unsigned long *active)
{
struct btrfs_fs_info *fs_info = bg->fs_info;
if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) {
btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
btrfs_bg_type_to_raid_name(map->type));
return -EINVAL;
}
for (int i = 0; i < map->num_stripes; i++) {
if (zone_info[i].alloc_offset == WP_MISSING_DEV ||
zone_info[i].alloc_offset == WP_CONVENTIONAL)
continue;
if (test_bit(0, active) != test_bit(i, active)) {
if (!btrfs_zone_activate(bg))
return -EIO;
} else {
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &bg->runtime_flags);
}
if ((i % map->sub_stripes) == 0) {
bg->zone_capacity += zone_info[i].capacity;
bg->alloc_offset += zone_info[i].alloc_offset;
}
}
return 0;
}
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *device;
u64 logical = cache->start;
u64 length = cache->length;
struct zone_info *zone_info = NULL;
int ret;
int i;
unsigned int nofs_flag;
u64 *alloc_offsets = NULL;
u64 *caps = NULL;
u64 *physical = NULL;
unsigned long *active = NULL;
u64 last_alloc = 0;
u32 num_sequential = 0, num_conventional = 0;
@ -1328,20 +1591,8 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
}
alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
if (!alloc_offsets) {
ret = -ENOMEM;
goto out;
}
caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
if (!caps) {
ret = -ENOMEM;
goto out;
}
physical = kcalloc(map->num_stripes, sizeof(*physical), GFP_NOFS);
if (!physical) {
zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);
if (!zone_info) {
ret = -ENOMEM;
goto out;
}
@ -1353,98 +1604,14 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
for (i = 0; i < map->num_stripes; i++) {
bool is_sequential;
struct blk_zone zone;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int dev_replace_is_ongoing = 0;
ret = btrfs_load_zone_info(fs_info, i, &zone_info[i], active, map);
if (ret)
goto out;
device = map->stripes[i].dev;
physical[i] = map->stripes[i].physical;
if (device->bdev == NULL) {
alloc_offsets[i] = WP_MISSING_DEV;
continue;
}
is_sequential = btrfs_dev_is_sequential(device, physical[i]);
if (is_sequential)
num_sequential++;
else
if (zone_info[i].alloc_offset == WP_CONVENTIONAL)
num_conventional++;
/*
* Consider a zone as active if we can allow any number of
* active zones.
*/
if (!device->zone_info->max_active_zones)
__set_bit(i, active);
if (!is_sequential) {
alloc_offsets[i] = WP_CONVENTIONAL;
continue;
}
/*
* This zone will be used for allocation, so mark this zone
* non-empty.
*/
btrfs_dev_clear_zone_empty(device, physical[i]);
down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, physical[i]);
up_read(&dev_replace->rwsem);
/*
* The group is mapped to a sequential zone. Get the zone write
* pointer to determine the allocation offset within the zone.
*/
WARN_ON(!IS_ALIGNED(physical[i], fs_info->zone_size));
nofs_flag = memalloc_nofs_save();
ret = btrfs_get_dev_zone(device, physical[i], &zone);
memalloc_nofs_restore(nofs_flag);
if (ret == -EIO || ret == -EOPNOTSUPP) {
ret = 0;
alloc_offsets[i] = WP_MISSING_DEV;
continue;
} else if (ret) {
goto out;
}
if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) {
btrfs_err_in_rcu(fs_info,
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
zone.start << SECTOR_SHIFT,
rcu_str_deref(device->name), device->devid);
ret = -EIO;
goto out;
}
caps[i] = (zone.capacity << SECTOR_SHIFT);
switch (zone.cond) {
case BLK_ZONE_COND_OFFLINE:
case BLK_ZONE_COND_READONLY:
btrfs_err(fs_info,
"zoned: offline/readonly zone %llu on device %s (devid %llu)",
physical[i] >> device->zone_info->zone_size_shift,
rcu_str_deref(device->name), device->devid);
alloc_offsets[i] = WP_MISSING_DEV;
break;
case BLK_ZONE_COND_EMPTY:
alloc_offsets[i] = 0;
break;
case BLK_ZONE_COND_FULL:
alloc_offsets[i] = caps[i];
break;
default:
/* Partially used zone */
alloc_offsets[i] =
((zone.wp - zone.start) << SECTOR_SHIFT);
__set_bit(i, active);
break;
}
else
num_sequential++;
}
if (num_sequential > 0)
@ -1468,63 +1635,24 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
case 0: /* single */
if (alloc_offsets[0] == WP_MISSING_DEV) {
btrfs_err(fs_info,
"zoned: cannot recover write pointer for zone %llu",
physical[0]);
ret = -EIO;
goto out;
}
cache->alloc_offset = alloc_offsets[0];
cache->zone_capacity = caps[0];
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags);
ret = btrfs_load_block_group_single(cache, &zone_info[0], active);
break;
case BTRFS_BLOCK_GROUP_DUP:
if (map->type & BTRFS_BLOCK_GROUP_DATA) {
btrfs_err(fs_info, "zoned: profile DUP not yet supported on data bg");
ret = -EINVAL;
goto out;
}
if (alloc_offsets[0] == WP_MISSING_DEV) {
btrfs_err(fs_info,
"zoned: cannot recover write pointer for zone %llu",
physical[0]);
ret = -EIO;
goto out;
}
if (alloc_offsets[1] == WP_MISSING_DEV) {
btrfs_err(fs_info,
"zoned: cannot recover write pointer for zone %llu",
physical[1]);
ret = -EIO;
goto out;
}
if (alloc_offsets[0] != alloc_offsets[1]) {
btrfs_err(fs_info,
"zoned: write pointer offset mismatch of zones in DUP profile");
ret = -EIO;
goto out;
}
if (test_bit(0, active) != test_bit(1, active)) {
if (!btrfs_zone_activate(cache)) {
ret = -EIO;
goto out;
}
} else {
if (test_bit(0, active))
set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
&cache->runtime_flags);
}
cache->alloc_offset = alloc_offsets[0];
cache->zone_capacity = min(caps[0], caps[1]);
ret = btrfs_load_block_group_dup(cache, map, zone_info, active);
break;
case BTRFS_BLOCK_GROUP_RAID1:
case BTRFS_BLOCK_GROUP_RAID1C3:
case BTRFS_BLOCK_GROUP_RAID1C4:
ret = btrfs_load_block_group_raid1(cache, map, zone_info, active);
break;
case BTRFS_BLOCK_GROUP_RAID0:
ret = btrfs_load_block_group_raid0(cache, map, zone_info, active);
break;
case BTRFS_BLOCK_GROUP_RAID10:
ret = btrfs_load_block_group_raid10(cache, map, zone_info, active);
break;
case BTRFS_BLOCK_GROUP_RAID5:
case BTRFS_BLOCK_GROUP_RAID6:
/* non-single profiles are not supported yet */
default:
btrfs_err(fs_info, "zoned: profile %s not yet supported",
btrfs_bg_type_to_raid_name(map->type));
@ -1570,9 +1698,7 @@ out:
cache->physical_map = NULL;
}
bitmap_free(active);
kfree(physical);
kfree(caps);
kfree(alloc_offsets);
kfree(zone_info);
free_extent_map(em);
return ret;
@ -1609,7 +1735,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags);
set_extent_buffer_dirty(eb);
set_extent_bit(&trans->dirty_pages, eb->start, eb->start + eb->len - 1,
EXTENT_DIRTY | EXTENT_NOWAIT, NULL);
EXTENT_DIRTY, NULL);
}
bool btrfs_use_zone_append(struct btrfs_bio *bbio)
@ -1887,7 +2013,7 @@ static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical,
int i, ret;
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&mapped_length, &bioc, NULL, NULL, 1);
&mapped_length, &bioc, NULL, NULL);
if (ret || !bioc || mapped_length < PAGE_SIZE) {
ret = -EIO;
goto out_put_bioc;

View File

@ -145,7 +145,7 @@ static void zstd_reclaim_timer_fn(struct timer_list *timer)
}
/*
* zstd_calc_ws_mem_sizes - calculate monotonic memory bounds
* Calculate monotonic memory bounds.
*
* It is possible based on the level configurations that a higher level
* workspace uses less memory than a lower level workspace. In order to reuse
@ -218,7 +218,8 @@ void zstd_cleanup_workspace_manager(void)
}
/*
* zstd_find_workspace - find workspace
* Find workspace for given level.
*
* @level: compression level
*
* This iterates over the set bits in the active_map beginning at the requested
@ -256,7 +257,8 @@ static struct list_head *zstd_find_workspace(unsigned int level)
}
/*
* zstd_get_workspace - zstd's get_workspace
* Zstd get_workspace for level.
*
* @level: compression level
*
* If @level is 0, then any compression level can be used. Therefore, we begin
@ -296,7 +298,8 @@ again:
}
/*
* zstd_put_workspace - zstd put_workspace
* Zstd put_workspace.
*
* @ws: list_head for the workspace
*
* When putting back a workspace, we only need to update the LRU if we are of

View File

@ -1561,7 +1561,6 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( const void *, wq )
__field( const void *, func )
__field( const void *, ordered_func )
__field( const void *, ordered_free )
__field( const void *, normal_work )
),
@ -1570,14 +1569,12 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->wq = work->wq;
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
__entry->normal_work = &work->normal_work;
),
TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
"ordered_free=%p",
TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
__entry->func, __entry->ordered_func)
);
/*
@ -2497,6 +2494,82 @@ DEFINE_EVENT(btrfs_raid56_bio, raid56_write,
TP_ARGS(rbio, bio, trace_info)
);
TRACE_EVENT(btrfs_insert_one_raid_extent,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
int num_stripes),
TP_ARGS(fs_info, logical, length, num_stripes),
TP_STRUCT__entry_btrfs(
__field( u64, logical )
__field( u64, length )
__field( int, num_stripes )
),
TP_fast_assign_btrfs(fs_info,
__entry->logical = logical;
__entry->length = length;
__entry->num_stripes = num_stripes;
),
TP_printk_btrfs("logical=%llu length=%llu num_stripes=%d",
__entry->logical, __entry->length,
__entry->num_stripes)
);
TRACE_EVENT(btrfs_raid_extent_delete,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 start, u64 end,
u64 found_start, u64 found_end),
TP_ARGS(fs_info, start, end, found_start, found_end),
TP_STRUCT__entry_btrfs(
__field( u64, start )
__field( u64, end )
__field( u64, found_start )
__field( u64, found_end )
),
TP_fast_assign_btrfs(fs_info,
__entry->start = start;
__entry->end = end;
__entry->found_start = found_start;
__entry->found_end = found_end;
),
TP_printk_btrfs("start=%llu end=%llu found_start=%llu found_end=%llu",
__entry->start, __entry->end, __entry->found_start,
__entry->found_end)
);
TRACE_EVENT(btrfs_get_raid_extent_offset,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 logical, u64 length,
u64 physical, u64 devid),
TP_ARGS(fs_info, logical, length, physical, devid),
TP_STRUCT__entry_btrfs(
__field( u64, logical )
__field( u64, length )
__field( u64, physical )
__field( u64, devid )
),
TP_fast_assign_btrfs(fs_info,
__entry->logical = logical;
__entry->length = length;
__entry->physical = physical;
__entry->devid = devid;
),
TP_printk_btrfs("logical=%llu length=%llu physical=%llu devid=%llu",
__entry->logical, __entry->length, __entry->physical,
__entry->devid)
);
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */

View File

@ -333,6 +333,8 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
#define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12)
#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13)
#define BTRFS_FEATURE_INCOMPAT_RAID_STRIPE_TREE (1ULL << 14)
#define BTRFS_FEATURE_INCOMPAT_SIMPLE_QUOTA (1ULL << 16)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@ -753,6 +755,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_QUOTA_CTL_ENABLE 1
#define BTRFS_QUOTA_CTL_DISABLE 2
#define BTRFS_QUOTA_CTL_RESCAN__NOTUSED 3
#define BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA 4
struct btrfs_ioctl_quota_ctl_args {
__u64 cmd;
__u64 status;

View File

@ -73,6 +73,9 @@
/* Holds the block group items for extent tree v2. */
#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL
/* Tracks RAID stripes in block groups. */
#define BTRFS_RAID_STRIPE_TREE_OBJECTID 12ULL
/* device stats in the device tree */
#define BTRFS_DEV_STATS_OBJECTID 0ULL
@ -230,6 +233,14 @@
#define BTRFS_SHARED_DATA_REF_KEY 184
/*
* Special inline ref key which stores the id of the subvolume which originally
* created the extent. This subvolume owns the extent permanently from the
* perspective of simple quotas. Needed to know which subvolume to free quota
* usage from when the extent is deleted.
*/
#define BTRFS_EXTENT_OWNER_REF_KEY 188
/*
* block groups give us hints into the extent allocation trees. Which
* blocks are free etc etc
@ -261,6 +272,8 @@
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
#define BTRFS_RAID_STRIPE_KEY 230
/*
* Records the overall state of the qgroups.
* There's only one instance of this key present,
@ -719,6 +732,30 @@ struct btrfs_free_space_header {
__le64 num_bitmaps;
} __attribute__ ((__packed__));
struct btrfs_raid_stride {
/* The id of device this raid extent lives on. */
__le64 devid;
/* The physical location on disk. */
__le64 physical;
} __attribute__ ((__packed__));
/* The stripe_extent::encoding, 1:1 mapping of enum btrfs_raid_types. */
#define BTRFS_STRIPE_RAID0 1
#define BTRFS_STRIPE_RAID1 2
#define BTRFS_STRIPE_DUP 3
#define BTRFS_STRIPE_RAID10 4
#define BTRFS_STRIPE_RAID5 5
#define BTRFS_STRIPE_RAID6 6
#define BTRFS_STRIPE_RAID1C3 7
#define BTRFS_STRIPE_RAID1C4 8
struct btrfs_stripe_extent {
__u8 encoding;
__u8 reserved[7];
/* An array of raid strides this stripe is composed of. */
struct btrfs_raid_stride strides[];
} __attribute__ ((__packed__));
#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0)
#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1)
@ -787,6 +824,10 @@ struct btrfs_shared_data_ref {
__le32 count;
} __attribute__ ((__packed__));
struct btrfs_extent_owner_ref {
__le64 root_id;
} __attribute__ ((__packed__));
struct btrfs_extent_inline_ref {
__u8 type;
__le64 offset;
@ -1204,9 +1245,17 @@ static inline __u16 btrfs_qgroup_level(__u64 qgroupid)
*/
#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2)
/*
* Whether or not this filesystem is using simple quotas. Not exactly the
* incompat bit, because we support using simple quotas, disabling it, then
* going back to full qgroup quotas.
*/
#define BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE (1ULL << 3)
#define BTRFS_QGROUP_STATUS_FLAGS_MASK (BTRFS_QGROUP_STATUS_FLAG_ON | \
BTRFS_QGROUP_STATUS_FLAG_RESCAN | \
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT)
BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT | \
BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE)
#define BTRFS_QGROUP_STATUS_VERSION 1
@ -1228,6 +1277,15 @@ struct btrfs_qgroup_status_item {
* of the scan. It contains a logical address
*/
__le64 rescan;
/*
* The generation when quotas were last enabled. Used by simple quotas to
* avoid decrementing when freeing an extent that was written before
* enable.
*
* Set only if flags contain BTRFS_QGROUP_STATUS_FLAG_SIMPLE_MODE.
*/
__le64 enable_gen;
} __attribute__ ((__packed__));
struct btrfs_qgroup_info_item {