for-5.12-rc1-tag
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmA85UwACgkQxWXV+ddt WDsdeA/8DXM6pMGaLkYcvkGvR53/vWwQlKq+i+3zuc41fYFJ7k+DQ7/K5hDbEMoM E7YsksoRlNVruH/ZvSdtx1exQ/tNrTdqPuds/UR31lIvS2NX9OZZToGWoC8VmrNw eS9yAwz/7JKUBA6MlMxZFv89OJoHUX9brPSeZVA8hOo3jDr5LXVm0IBskYOBUDRx JIvt+lkJLKMXPWxwUt3hbkbFPAUQVxYYavhJhWiXT9gdxF+eRgjMI0EN43vBMN2y kZtoZGeWR64heo9ehFzYMDlAVyph/loGovQ7m6XVzkk5DQGitg0vs3iAG46WjEXt jxt0ZKmJQwJb3/zNPd8VlLMhULGc56jcq8uhaC2pXjhy18p7EAXml+fH51BExLYK 11hiWtWsrbTsZuYgr6fpqVFukkL/yyH/s7iCWT8Wn+AoPg2fUD99F5nkKT2T0Sso t7MyJVlTdq8avWbTB+8kFx8+Hy1TsRz3Ic2Zpm8+F3KeVflrb31jJIp3cxPCdfUp fWX+7VDjKVt00Ti7uP0fAaFO4hn2FjYcWzR3KOjomWox+8LVxB8PbD4H8jD7As2a 5gGGOULmkiZej7hcP6J6zvnmgZIVAGPsSGSVfZtPh4VGiycL3DozcD0x5QerLchR NZDyIBh2KGE0cRr+cjkPxDyeqfGXQ7VUjp13CBriCkER8SOmBdw= =QJEy -----END PGP SIGNATURE----- Merge tag 'for-5.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux Pull btrfs fixes from David Sterba: "This is the first batch of fixes that usually arrive during the merge window code freeze. Regressions and stable material. Regressions: - fix deadlock in log sync in zoned mode - fix bugs in subpage mode still wrongly assuming sectorsize == page size Fixes: - fix missing kunmap of the Q stripe in RAID6 - block group fixes: - fix race between extent freeing/allocation when using bitmaps - avoid double put of block group when emptying cluster - swapfile fixes: - fix swapfile writes vs running scrub - fix swapfile activation vs snapshot creation - fix stale data exposure after cloning a hole with NO_HOLES enabled - remove tree-checker check that does not work in case information from other leaves is necessary" * tag 'for-5.12-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: btrfs: zoned: fix deadlock on log sync btrfs: avoid double put of block group when emptying cluster btrfs: fix stale data exposure after cloning a hole with NO_HOLES enabled btrfs: tree-checker: do not error out if extent ref hash doesn't match btrfs: fix race between swap file activation and snapshot creation btrfs: fix race between writes to swap files and scrub btrfs: avoid checking for RO block group twice during nocow writeback btrfs: fix race between extent freeing/allocation when using bitmaps btrfs: make check_compressed_csum() to be subpage compatible btrfs: make btrfs_submit_compressed_read() subpage compatible btrfs: fix raid6 qstripe kmap
This commit is contained in:
commit
c608aca57d
@ -1162,6 +1162,11 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
|
||||
spin_lock(&sinfo->lock);
|
||||
spin_lock(&cache->lock);
|
||||
|
||||
if (cache->swap_extents) {
|
||||
ret = -ETXTBSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (cache->ro) {
|
||||
cache->ro++;
|
||||
ret = 0;
|
||||
@ -2307,7 +2312,7 @@ again:
|
||||
}
|
||||
|
||||
ret = inc_block_group_ro(cache, 0);
|
||||
if (!do_chunk_alloc)
|
||||
if (!do_chunk_alloc || ret == -ETXTBSY)
|
||||
goto unlock_out;
|
||||
if (!ret)
|
||||
goto out;
|
||||
@ -2316,6 +2321,8 @@ again:
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = inc_block_group_ro(cache, 0);
|
||||
if (ret == -ETXTBSY)
|
||||
goto unlock_out;
|
||||
out:
|
||||
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
|
||||
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
|
||||
@ -3406,6 +3413,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
ASSERT(list_empty(&block_group->io_list));
|
||||
ASSERT(list_empty(&block_group->bg_list));
|
||||
ASSERT(refcount_read(&block_group->refs) == 1);
|
||||
ASSERT(block_group->swap_extents == 0);
|
||||
btrfs_put_block_group(block_group);
|
||||
|
||||
spin_lock(&info->block_group_cache_lock);
|
||||
@ -3472,3 +3480,26 @@ void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
|
||||
__btrfs_remove_free_space_cache(block_group->free_space_ctl);
|
||||
}
|
||||
}
|
||||
|
||||
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
spin_lock(&bg->lock);
|
||||
if (bg->ro)
|
||||
ret = false;
|
||||
else
|
||||
bg->swap_extents++;
|
||||
spin_unlock(&bg->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
|
||||
{
|
||||
spin_lock(&bg->lock);
|
||||
ASSERT(!bg->ro);
|
||||
ASSERT(bg->swap_extents >= amount);
|
||||
bg->swap_extents -= amount;
|
||||
spin_unlock(&bg->lock);
|
||||
}
|
||||
|
@ -186,6 +186,12 @@ struct btrfs_block_group {
|
||||
/* Flag indicating this block group is placed on a sequential zone */
|
||||
bool seq_zone;
|
||||
|
||||
/*
|
||||
* Number of extents in this block group used for swap files.
|
||||
* All accesses protected by the spinlock 'lock'.
|
||||
*/
|
||||
int swap_extents;
|
||||
|
||||
/* Record locked full stripes for RAID5/6 block group */
|
||||
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
|
||||
|
||||
@ -312,4 +318,7 @@ static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
|
||||
void btrfs_freeze_block_group(struct btrfs_block_group *cache);
|
||||
void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
|
||||
|
||||
bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg);
|
||||
void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount);
|
||||
|
||||
#endif /* BTRFS_BLOCK_GROUP_H */
|
||||
|
@ -141,6 +141,7 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
|
||||
struct btrfs_fs_info *fs_info = inode->root->fs_info;
|
||||
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
|
||||
const u32 csum_size = fs_info->csum_size;
|
||||
const u32 sectorsize = fs_info->sectorsize;
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
char *kaddr;
|
||||
@ -154,22 +155,34 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
|
||||
shash->tfm = fs_info->csum_shash;
|
||||
|
||||
for (i = 0; i < cb->nr_pages; i++) {
|
||||
u32 pg_offset;
|
||||
u32 bytes_left = PAGE_SIZE;
|
||||
page = cb->compressed_pages[i];
|
||||
|
||||
kaddr = kmap_atomic(page);
|
||||
crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
|
||||
kunmap_atomic(kaddr);
|
||||
/* Determine the remaining bytes inside the page first */
|
||||
if (i == cb->nr_pages - 1)
|
||||
bytes_left = cb->compressed_len - i * PAGE_SIZE;
|
||||
|
||||
if (memcmp(&csum, cb_sum, csum_size)) {
|
||||
btrfs_print_data_csum_error(inode, disk_start,
|
||||
csum, cb_sum, cb->mirror_num);
|
||||
if (btrfs_io_bio(bio)->device)
|
||||
btrfs_dev_stat_inc_and_print(
|
||||
btrfs_io_bio(bio)->device,
|
||||
BTRFS_DEV_STAT_CORRUPTION_ERRS);
|
||||
return -EIO;
|
||||
/* Hash through the page sector by sector */
|
||||
for (pg_offset = 0; pg_offset < bytes_left;
|
||||
pg_offset += sectorsize) {
|
||||
kaddr = kmap_atomic(page);
|
||||
crypto_shash_digest(shash, kaddr + pg_offset,
|
||||
sectorsize, csum);
|
||||
kunmap_atomic(kaddr);
|
||||
|
||||
if (memcmp(&csum, cb_sum, csum_size) != 0) {
|
||||
btrfs_print_data_csum_error(inode, disk_start,
|
||||
csum, cb_sum, cb->mirror_num);
|
||||
if (btrfs_io_bio(bio)->device)
|
||||
btrfs_dev_stat_inc_and_print(
|
||||
btrfs_io_bio(bio)->device,
|
||||
BTRFS_DEV_STAT_CORRUPTION_ERRS);
|
||||
return -EIO;
|
||||
}
|
||||
cb_sum += csum_size;
|
||||
disk_start += sectorsize;
|
||||
}
|
||||
cb_sum += csum_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -640,7 +653,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree,
|
||||
page_offset(bio_first_page_all(bio)),
|
||||
PAGE_SIZE);
|
||||
fs_info->sectorsize);
|
||||
read_unlock(&em_tree->lock);
|
||||
if (!em)
|
||||
return BLK_STS_IOERR;
|
||||
@ -698,19 +711,30 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
refcount_set(&cb->pending_bios, 1);
|
||||
|
||||
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
|
||||
u32 pg_len = PAGE_SIZE;
|
||||
int submit = 0;
|
||||
|
||||
/*
|
||||
* To handle subpage case, we need to make sure the bio only
|
||||
* covers the range we need.
|
||||
*
|
||||
* If we're at the last page, truncate the length to only cover
|
||||
* the remaining part.
|
||||
*/
|
||||
if (pg_index == nr_pages - 1)
|
||||
pg_len = min_t(u32, PAGE_SIZE,
|
||||
compressed_len - pg_index * PAGE_SIZE);
|
||||
|
||||
page = cb->compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
page->index = em_start >> PAGE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_iter.bi_size)
|
||||
submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
|
||||
submit = btrfs_bio_fits_in_stripe(page, pg_len,
|
||||
comp_bio, 0);
|
||||
|
||||
page->mapping = NULL;
|
||||
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
|
||||
PAGE_SIZE) {
|
||||
if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
|
||||
unsigned int nr_sectors;
|
||||
|
||||
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
|
||||
@ -743,9 +767,9 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
||||
comp_bio->bi_private = cb;
|
||||
comp_bio->bi_end_io = end_compressed_bio_read;
|
||||
|
||||
bio_add_page(comp_bio, page, PAGE_SIZE, 0);
|
||||
bio_add_page(comp_bio, page, pg_len, 0);
|
||||
}
|
||||
cur_disk_byte += PAGE_SIZE;
|
||||
cur_disk_byte += pg_len;
|
||||
}
|
||||
|
||||
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
|
||||
|
@ -524,6 +524,11 @@ struct btrfs_swapfile_pin {
|
||||
* points to a struct btrfs_device.
|
||||
*/
|
||||
bool is_block_group;
|
||||
/*
|
||||
* Only used when 'is_block_group' is true and it is the number of
|
||||
* extents used by a swapfile for this block group ('ptr' field).
|
||||
*/
|
||||
int bg_extent_count;
|
||||
};
|
||||
|
||||
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
|
||||
|
@ -2801,8 +2801,10 @@ static void __btrfs_return_cluster_to_free_space(
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock(&cluster->lock);
|
||||
if (cluster->block_group != block_group)
|
||||
goto out;
|
||||
if (cluster->block_group != block_group) {
|
||||
spin_unlock(&cluster->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
cluster->block_group = NULL;
|
||||
cluster->window_start = 0;
|
||||
@ -2840,8 +2842,6 @@ static void __btrfs_return_cluster_to_free_space(
|
||||
entry->offset, &entry->offset_index, bitmap);
|
||||
}
|
||||
cluster->root = RB_ROOT;
|
||||
|
||||
out:
|
||||
spin_unlock(&cluster->lock);
|
||||
btrfs_put_block_group(block_group);
|
||||
}
|
||||
@ -3125,8 +3125,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
|
||||
entry->bytes -= bytes;
|
||||
}
|
||||
|
||||
if (entry->bytes == 0)
|
||||
rb_erase(&entry->offset_index, &cluster->root);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
@ -3143,7 +3141,10 @@ out:
|
||||
ctl->free_space -= bytes;
|
||||
if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
|
||||
ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
|
||||
|
||||
spin_lock(&cluster->lock);
|
||||
if (entry->bytes == 0) {
|
||||
rb_erase(&entry->offset_index, &cluster->root);
|
||||
ctl->free_extents--;
|
||||
if (entry->bitmap) {
|
||||
kmem_cache_free(btrfs_free_space_bitmap_cachep,
|
||||
@ -3156,6 +3157,7 @@ out:
|
||||
kmem_cache_free(btrfs_free_space_cachep, entry);
|
||||
}
|
||||
|
||||
spin_unlock(&cluster->lock);
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -1674,9 +1674,6 @@ next_slot:
|
||||
*/
|
||||
btrfs_release_path(path);
|
||||
|
||||
/* If extent is RO, we must COW it */
|
||||
if (btrfs_extent_readonly(fs_info, disk_bytenr))
|
||||
goto out_check;
|
||||
ret = btrfs_cross_ref_exist(root, ino,
|
||||
found_key.offset -
|
||||
extent_offset, disk_bytenr, false);
|
||||
@ -1723,6 +1720,7 @@ next_slot:
|
||||
WARN_ON_ONCE(freespace_inode);
|
||||
goto out_check;
|
||||
}
|
||||
/* If the extent's block group is RO, we must COW */
|
||||
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
|
||||
goto out_check;
|
||||
nocow = true;
|
||||
@ -10200,6 +10198,7 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
|
||||
sp->ptr = ptr;
|
||||
sp->inode = inode;
|
||||
sp->is_block_group = is_block_group;
|
||||
sp->bg_extent_count = 1;
|
||||
|
||||
spin_lock(&fs_info->swapfile_pins_lock);
|
||||
p = &fs_info->swapfile_pins.rb_node;
|
||||
@ -10213,6 +10212,8 @@ static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
|
||||
(sp->ptr == entry->ptr && sp->inode > entry->inode)) {
|
||||
p = &(*p)->rb_right;
|
||||
} else {
|
||||
if (is_block_group)
|
||||
entry->bg_extent_count++;
|
||||
spin_unlock(&fs_info->swapfile_pins_lock);
|
||||
kfree(sp);
|
||||
return 1;
|
||||
@ -10238,8 +10239,11 @@ static void btrfs_free_swapfile_pins(struct inode *inode)
|
||||
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
|
||||
if (sp->inode == inode) {
|
||||
rb_erase(&sp->node, &fs_info->swapfile_pins);
|
||||
if (sp->is_block_group)
|
||||
if (sp->is_block_group) {
|
||||
btrfs_dec_block_group_swap_extents(sp->ptr,
|
||||
sp->bg_extent_count);
|
||||
btrfs_put_block_group(sp->ptr);
|
||||
}
|
||||
kfree(sp);
|
||||
}
|
||||
node = next;
|
||||
@ -10300,7 +10304,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
sector_t *span)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
struct extent_state *cached_state = NULL;
|
||||
struct extent_map *em = NULL;
|
||||
@ -10351,13 +10356,27 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
"cannot activate swapfile while exclusive operation is running");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent snapshot creation while we are activating the swap file.
|
||||
* We do not want to race with snapshot creation. If snapshot creation
|
||||
* already started before we bumped nr_swapfiles from 0 to 1 and
|
||||
* completes before the first write into the swap file after it is
|
||||
* activated, than that write would fallback to COW.
|
||||
*/
|
||||
if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) {
|
||||
btrfs_exclop_finish(fs_info);
|
||||
btrfs_warn(fs_info,
|
||||
"cannot activate swapfile because snapshot creation is in progress");
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* Snapshots can create extents which require COW even if NODATACOW is
|
||||
* set. We use this counter to prevent snapshots. We must increment it
|
||||
* before walking the extents because we don't want a concurrent
|
||||
* snapshot to run after we've already checked the extents.
|
||||
*/
|
||||
atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
|
||||
atomic_inc(&root->nr_swapfiles);
|
||||
|
||||
isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
|
||||
|
||||
@ -10454,6 +10473,17 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!btrfs_inc_block_group_swap_extents(bg)) {
|
||||
btrfs_warn(fs_info,
|
||||
"block group for swapfile at %llu is read-only%s",
|
||||
bg->start,
|
||||
atomic_read(&fs_info->scrubs_running) ?
|
||||
" (scrub running)" : "");
|
||||
btrfs_put_block_group(bg);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btrfs_add_swapfile_pin(inode, bg, true);
|
||||
if (ret) {
|
||||
btrfs_put_block_group(bg);
|
||||
@ -10492,6 +10522,8 @@ out:
|
||||
if (ret)
|
||||
btrfs_swap_deactivate(file);
|
||||
|
||||
btrfs_drew_write_unlock(&root->snapshot_lock);
|
||||
|
||||
btrfs_exclop_finish(fs_info);
|
||||
|
||||
if (ret)
|
||||
|
@ -2359,16 +2359,21 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
||||
SetPageUptodate(p_page);
|
||||
|
||||
if (has_qstripe) {
|
||||
/* RAID6, allocate and map temp space for the Q stripe */
|
||||
q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
|
||||
if (!q_page) {
|
||||
__free_page(p_page);
|
||||
goto cleanup;
|
||||
}
|
||||
SetPageUptodate(q_page);
|
||||
pointers[rbio->real_stripes - 1] = kmap(q_page);
|
||||
}
|
||||
|
||||
atomic_set(&rbio->error, 0);
|
||||
|
||||
/* Map the parity stripe just once */
|
||||
pointers[nr_data] = kmap(p_page);
|
||||
|
||||
for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
|
||||
struct page *p;
|
||||
void *parity;
|
||||
@ -2378,16 +2383,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
||||
pointers[stripe] = kmap(p);
|
||||
}
|
||||
|
||||
/* then add the parity stripe */
|
||||
pointers[stripe++] = kmap(p_page);
|
||||
|
||||
if (has_qstripe) {
|
||||
/*
|
||||
* raid6, add the qstripe and call the
|
||||
* library function to fill in our p/q
|
||||
*/
|
||||
pointers[stripe++] = kmap(q_page);
|
||||
|
||||
/* RAID6, call the library function to fill in our P/Q */
|
||||
raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
|
||||
pointers);
|
||||
} else {
|
||||
@ -2408,12 +2405,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
|
||||
|
||||
for (stripe = 0; stripe < nr_data; stripe++)
|
||||
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
|
||||
kunmap(p_page);
|
||||
}
|
||||
|
||||
kunmap(p_page);
|
||||
__free_page(p_page);
|
||||
if (q_page)
|
||||
if (q_page) {
|
||||
kunmap(q_page);
|
||||
__free_page(q_page);
|
||||
}
|
||||
|
||||
writeback:
|
||||
/*
|
||||
|
@ -553,6 +553,24 @@ process_slot:
|
||||
*/
|
||||
btrfs_release_path(path);
|
||||
|
||||
/*
|
||||
* When using NO_HOLES and we are cloning a range that covers
|
||||
* only a hole (no extents) into a range beyond the current
|
||||
* i_size, punching a hole in the target range will not create
|
||||
* an extent map defining a hole, because the range starts at or
|
||||
* beyond current i_size. If the file previously had an i_size
|
||||
* greater than the new i_size set by this clone operation, we
|
||||
* need to make sure the next fsync is a full fsync, so that it
|
||||
* detects and logs a hole covering a range from the current
|
||||
* i_size to the new i_size. If the clone range covers extents,
|
||||
* besides a hole, then we know the full sync flag was already
|
||||
* set by previous calls to btrfs_replace_file_extents() that
|
||||
* replaced file extent items.
|
||||
*/
|
||||
if (last_dest_end >= i_size_read(inode))
|
||||
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
|
||||
ret = btrfs_replace_file_extents(inode, path, last_dest_end,
|
||||
destoff + len - 1, NULL, &trans);
|
||||
if (ret)
|
||||
|
@ -3767,6 +3767,13 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
||||
* commit_transactions.
|
||||
*/
|
||||
ro_set = 0;
|
||||
} else if (ret == -ETXTBSY) {
|
||||
btrfs_warn(fs_info,
|
||||
"skipping scrub of block group %llu due to active swapfile",
|
||||
cache->start);
|
||||
scrub_pause_off(fs_info);
|
||||
ret = 0;
|
||||
goto skip_unfreeze;
|
||||
} else {
|
||||
btrfs_warn(fs_info,
|
||||
"failed setting block group ro: %d", ret);
|
||||
@ -3862,7 +3869,7 @@ done:
|
||||
} else {
|
||||
spin_unlock(&cache->lock);
|
||||
}
|
||||
|
||||
skip_unfreeze:
|
||||
btrfs_unfreeze_block_group(cache);
|
||||
btrfs_put_block_group(cache);
|
||||
if (ret)
|
||||
|
@ -1453,22 +1453,14 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
|
||||
return -EUCLEAN;
|
||||
}
|
||||
for (; ptr < end; ptr += sizeof(*dref)) {
|
||||
u64 root_objectid;
|
||||
u64 owner;
|
||||
u64 offset;
|
||||
u64 hash;
|
||||
|
||||
/*
|
||||
* We cannot check the extent_data_ref hash due to possible
|
||||
* overflow from the leaf due to hash collisions.
|
||||
*/
|
||||
dref = (struct btrfs_extent_data_ref *)ptr;
|
||||
root_objectid = btrfs_extent_data_ref_root(leaf, dref);
|
||||
owner = btrfs_extent_data_ref_objectid(leaf, dref);
|
||||
offset = btrfs_extent_data_ref_offset(leaf, dref);
|
||||
hash = hash_extent_data_ref(root_objectid, owner, offset);
|
||||
if (unlikely(hash != key->offset)) {
|
||||
extent_err(leaf, slot,
|
||||
"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
|
||||
hash, key->offset);
|
||||
return -EUCLEAN;
|
||||
}
|
||||
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
|
||||
extent_err(leaf, slot,
|
||||
"invalid extent data backref offset, have %llu expect aligned to %u",
|
||||
|
@ -3174,16 +3174,13 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
||||
root_log_ctx.log_transid = log_root_tree->log_transid;
|
||||
|
||||
if (btrfs_is_zoned(fs_info)) {
|
||||
mutex_lock(&fs_info->tree_root->log_mutex);
|
||||
if (!log_root_tree->node) {
|
||||
ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
|
||||
if (ret) {
|
||||
mutex_unlock(&fs_info->tree_log_mutex);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&fs_info->tree_root->log_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user