mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
for-6.11-rc3-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmbB6dwACgkQxWXV+ddt WDu/Eg/9EXEoSPqRYxsRa2vLQjSbBbBCBDW1G75F5oUsvRKtfNhZ+w02JAqubirF wdBNaXoGQ9zJq/E0JtHHDqv9M6FV+g3aO0xM+ntmp9cZdFBVXRkrB3TlewMesKfI lXZW5kn35q6aeNi2MaJjk2G5Pr0MYjGGRezBuloc7TcIlgVijjLBlcnKEz263C1/ rXvENxowxPA20LiWviA4ZjlqlRQLBrgxqpSXLGg7mZs93XdbtPa2ZvzS7ffuZTI+ PUCYGEwI4E2Dpv+mswFb21SUdUPPmAycubERJvABqnZxCWupkevgvv+6MeWC5A2p 7OjoTmINRDDsNWYSvyHQ04U+0XkPmHCBKEkAuy0ZIajHJU4G6rpeDySX04/Cwzht mJZ37FzMGZ9LjEpL1uoPifWKcH0nUW9sWw4Tw9tgeuBG9RfI/BxZqRT9WLEkiXUI 7Bdq2Ir6fzv8IVKWkuqO6No+LDa//qF2ci0nWvCwbgdNquGFa9DmNmVDiKDqKW3R RlP+6laXEPCfKSycTpp94gASVeEEcKNYYC0B/FCBLJmXVcCJQ41qsMn/fYUn8Yn3 vVdmAuGKThYjO4RNjqy4FgNVLdY9280OGazH0B9t3HhL+U+9JLN8fnhD5H4hEpR/ dDC1quKmo6WIKAem6O6r3GRnCj8lbLaXpmp+MpyEUI7M5FyjsAc= =YBfJ -----END PGP SIGNATURE----- Merge tag 'for-6.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull more btrfs fixes from David Sterba: "A more fixes. We got reports that shrinker added in 6.10 still causes latency spikes and the fixes don't handle all corner cases. Due to summer holidays we're taking a shortcut to disable it for release builds and will fix it in the near future. - only enable extent map shrinker for DEBUG builds, temporary quick fix to avoid latency spikes for regular builds - update target inode's ctime on unlink, mandated by POSIX - properly take lock to read/update block group's zoned variables - add counted_by() annotations" * tag 'for-6.11-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: only enable extent map shrinker for DEBUG builds btrfs: zoned: properly take lock to read/update block group's zoned variables btrfs: tree-checker: add dev extent item checks btrfs: update target inode's ctime on unlink btrfs: send: annotate struct name_cache_entry with __counted_by()
This commit is contained in:
commit
57b14823ea
@ -2697,15 +2697,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
u64 offset = bytenr - block_group->start;
|
||||
u64 to_free, to_unusable;
|
||||
int bg_reclaim_threshold = 0;
|
||||
bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
|
||||
bool initial;
|
||||
u64 reclaimable_unusable;
|
||||
|
||||
WARN_ON(!initial && offset + size > block_group->zone_capacity);
|
||||
spin_lock(&block_group->lock);
|
||||
|
||||
initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
|
||||
WARN_ON(!initial && offset + size > block_group->zone_capacity);
|
||||
if (!initial)
|
||||
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
if (!used)
|
||||
to_free = size;
|
||||
else if (initial)
|
||||
@ -2718,7 +2719,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
to_free = offset + size - block_group->alloc_offset;
|
||||
to_unusable = size - to_free;
|
||||
|
||||
spin_lock(&ctl->tree_lock);
|
||||
ctl->free_space += to_free;
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
/*
|
||||
* If the block group is read-only, we should account freed space into
|
||||
* bytes_readonly.
|
||||
@ -2727,11 +2730,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
block_group->zone_unusable += to_unusable;
|
||||
WARN_ON(block_group->zone_unusable > block_group->length);
|
||||
}
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
if (!used) {
|
||||
spin_lock(&block_group->lock);
|
||||
block_group->alloc_offset -= size;
|
||||
spin_unlock(&block_group->lock);
|
||||
}
|
||||
|
||||
reclaimable_unusable = block_group->zone_unusable -
|
||||
@ -2745,6 +2745,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
btrfs_mark_bg_to_reclaim(block_group);
|
||||
}
|
||||
|
||||
spin_unlock(&block_group->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4195,6 +4195,7 @@ err:
|
||||
|
||||
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
|
||||
inode_inc_iversion(&inode->vfs_inode);
|
||||
inode_set_ctime_current(&inode->vfs_inode);
|
||||
inode_inc_iversion(&dir->vfs_inode);
|
||||
inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
|
||||
ret = btrfs_update_inode(trans, dir);
|
||||
|
@ -347,7 +347,7 @@ struct name_cache_entry {
|
||||
int ret;
|
||||
int need_later_update;
|
||||
int name_len;
|
||||
char name[];
|
||||
char name[] __counted_by(name_len);
|
||||
};
|
||||
|
||||
/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
|
||||
|
@ -2402,7 +2402,13 @@ static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_contro
|
||||
|
||||
trace_btrfs_extent_map_shrinker_count(fs_info, nr);
|
||||
|
||||
return nr;
|
||||
/*
|
||||
* Only report the real number for DEBUG builds, as there are reports of
|
||||
* serious performance degradation caused by too frequent shrinks.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
|
||||
return nr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
|
||||
|
@ -1764,6 +1764,72 @@ static int check_raid_stripe_extent(const struct extent_buffer *leaf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_dev_extent_item(const struct extent_buffer *leaf,
|
||||
const struct btrfs_key *key,
|
||||
int slot,
|
||||
struct btrfs_key *prev_key)
|
||||
{
|
||||
struct btrfs_dev_extent *de;
|
||||
const u32 sectorsize = leaf->fs_info->sectorsize;
|
||||
|
||||
de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
|
||||
/* Basic fixed member checks. */
|
||||
if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) !=
|
||||
BTRFS_CHUNK_TREE_OBJECTID)) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid dev extent chunk tree id, has %llu expect %llu",
|
||||
btrfs_dev_extent_chunk_tree(leaf, de),
|
||||
BTRFS_CHUNK_TREE_OBJECTID);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) !=
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid dev extent chunk objectid, has %llu expect %llu",
|
||||
btrfs_dev_extent_chunk_objectid(leaf, de),
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
/* Alignment check. */
|
||||
if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid dev extent key.offset, has %llu not aligned to %u",
|
||||
key->offset, sectorsize);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de),
|
||||
sectorsize))) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid dev extent chunk offset, has %llu not aligned to %u",
|
||||
btrfs_dev_extent_chunk_objectid(leaf, de),
|
||||
sectorsize);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de),
|
||||
sectorsize))) {
|
||||
generic_err(leaf, slot,
|
||||
"invalid dev extent length, has %llu not aligned to %u",
|
||||
btrfs_dev_extent_length(leaf, de), sectorsize);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
/* Overlap check with previous dev extent. */
|
||||
if (slot && prev_key->objectid == key->objectid &&
|
||||
prev_key->type == key->type) {
|
||||
struct btrfs_dev_extent *prev_de;
|
||||
u64 prev_len;
|
||||
|
||||
prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent);
|
||||
prev_len = btrfs_dev_extent_length(leaf, prev_de);
|
||||
if (unlikely(prev_key->offset + prev_len > key->offset)) {
|
||||
generic_err(leaf, slot,
|
||||
"dev extent overlap, prev offset %llu len %llu current offset %llu",
|
||||
prev_key->objectid, prev_len, key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common point to switch the item-specific validation.
|
||||
*/
|
||||
@ -1800,6 +1866,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
|
||||
case BTRFS_DEV_ITEM_KEY:
|
||||
ret = check_dev_item(leaf, key, slot);
|
||||
break;
|
||||
case BTRFS_DEV_EXTENT_KEY:
|
||||
ret = check_dev_extent_item(leaf, key, slot, prev_key);
|
||||
break;
|
||||
case BTRFS_INODE_ITEM_KEY:
|
||||
ret = check_inode_item(leaf, key, slot);
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user