2018-04-03 17:23:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2007-10-15 20:18:56 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2012-07-10 02:22:35 +00:00
|
|
|
#include <asm/unaligned.h>
|
2022-10-19 14:50:49 +00:00
|
|
|
#include "messages.h"
|
2024-01-25 16:44:47 +00:00
|
|
|
#include "extent_io.h"
|
|
|
|
#include "fs.h"
|
2022-10-19 14:50:59 +00:00
|
|
|
#include "accessors.h"
|
2012-07-10 02:22:35 +00:00
|
|
|
|
2020-04-30 21:38:11 +00:00
|
|
|
static bool check_setget_bounds(const struct extent_buffer *eb,
|
|
|
|
const void *ptr, unsigned off, int size)
|
|
|
|
{
|
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off;
|
|
|
|
|
btrfs: remove redundant check in up check_setget_bounds
There are two separate checks in the bounds checker, the first one being
a special case of the second. As this function is performance critical
due to checking access to any eb member, reducing the size can slightly
improve performance.
On a release build on x86_64 the helper is completely inlined so the
function call overhead is also gone.
There was a report of 5% performance drop on metadata heavy workload,
that disappeared after disabling asserts. The most significant part of
that is the bounds checker.
https://lore.kernel.org/linux-btrfs/20200724164147.39925-1-josef@toxicpanda.com/
After the analysis, the optimized code removes the worst overhead which
is the function call and the performance was restored.
https://lore.kernel.org/linux-btrfs/20200730110943.GE3703@twin.jikos.cz/
1. baseline, asserts on, setget check on
run time: 46s
run time with perf: 48s
2. asserts on, comment out setget check
run time: 44s
run time with perf: 47s
So this is confirms the 5% difference
3. asserts on, optimized seget check
run time: 44s
run time with perf: 47s
The optimizations are reducing the number of ifs to 1 and inlining the
hot path. Low-level stuff, gets the performance back. Patch below.
4. asserts off, no setget check
run time: 44s
run time with perf: 45s
This verifies that asserts other than the setget check have negligible
impact on performance and it's not harmful to keep them on.
Analysis where the performance is lost:
* check_setget_bounds is short function, but it's still a function call,
changing the flow of instructions and given how many times it's
called the overhead adds up
* there are two conditions, one to check if the range is
completely outside (member_offset > eb->len) or partially inside
(member_offset + size > eb->len)
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-27 18:59:20 +00:00
|
|
|
if (unlikely(member_offset + size > eb->len)) {
|
2020-04-30 21:38:11 +00:00
|
|
|
btrfs_warn(eb->fs_info,
|
btrfs: remove redundant check in up check_setget_bounds
There are two separate checks in the bounds checker, the first one being
a special case of the second. As this function is performance critical
due to checking access to any eb member, reducing the size can slightly
improve performance.
On a release build on x86_64 the helper is completely inlined so the
function call overhead is also gone.
There was a report of 5% performance drop on metadata heavy workload,
that disappeared after disabling asserts. The most significant part of
that is the bounds checker.
https://lore.kernel.org/linux-btrfs/20200724164147.39925-1-josef@toxicpanda.com/
After the analysis, the optimized code removes the worst overhead which
is the function call and the performance was restored.
https://lore.kernel.org/linux-btrfs/20200730110943.GE3703@twin.jikos.cz/
1. baseline, asserts on, setget check on
run time: 46s
run time with perf: 48s
2. asserts on, comment out setget check
run time: 44s
run time with perf: 47s
So this is confirms the 5% difference
3. asserts on, optimized seget check
run time: 44s
run time with perf: 47s
The optimizations are reducing the number of ifs to 1 and inlining the
hot path. Low-level stuff, gets the performance back. Patch below.
4. asserts off, no setget check
run time: 44s
run time with perf: 45s
This verifies that asserts other than the setget check have negligible
impact on performance and it's not harmful to keep them on.
Analysis where the performance is lost:
* check_setget_bounds is short function, but it's still a function call,
changing the flow of instructions and given how many times it's
called the overhead adds up
* there are two conditions, one to check if the range is
completely outside (member_offset > eb->len) or partially inside
(member_offset + size > eb->len)
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-27 18:59:20 +00:00
|
|
|
"bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
|
|
|
|
(member_offset > eb->len ? "start" : "end"),
|
2020-04-30 21:38:11 +00:00
|
|
|
(unsigned long)ptr, eb->start, member_offset, size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-10-19 14:50:59 +00:00
|
|
|
void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
token->eb = eb;
|
2023-12-06 23:09:27 +00:00
|
|
|
token->kaddr = folio_address(eb->folios[0]);
|
2022-10-19 14:50:59 +00:00
|
|
|
token->offset = 0;
|
|
|
|
}
|
|
|
|
|
2012-07-10 02:22:35 +00:00
|
|
|
/*
|
2020-05-06 18:54:13 +00:00
|
|
|
* Macro templates that define helpers to read/write extent buffer data of a
|
|
|
|
* given size, that are also used via ctree.h for access to item members by
|
|
|
|
* specialized helpers.
|
2008-09-29 19:18:18 +00:00
|
|
|
*
|
2020-05-06 18:54:13 +00:00
|
|
|
* Generic helpers:
|
|
|
|
* - btrfs_set_8 (for 8/16/32/64)
|
|
|
|
* - btrfs_get_8 (for 8/16/32/64)
|
2008-09-29 19:18:18 +00:00
|
|
|
*
|
2020-05-06 18:54:13 +00:00
|
|
|
* Generic helpers with a token (cached address of the most recently accessed
|
|
|
|
* page):
|
|
|
|
* - btrfs_set_token_8 (for 8/16/32/64)
|
|
|
|
* - btrfs_get_token_8 (for 8/16/32/64)
|
2008-09-29 19:18:18 +00:00
|
|
|
*
|
2020-05-06 18:54:13 +00:00
|
|
|
* The set/get functions handle data spanning two pages transparently, in case
|
|
|
|
* metadata block size is larger than page. Every pointer to metadata items is
|
|
|
|
* an offset into the extent buffer page array, cast to a specific type. This
|
|
|
|
* gives us all the type checking.
|
2019-08-09 15:12:38 +00:00
|
|
|
*
|
2023-12-06 23:09:27 +00:00
|
|
|
* The extent buffer pages stored in the array folios may not form a contiguous
|
2020-05-06 18:54:13 +00:00
|
|
|
* phyusical range, but the API functions assume the linear offset to the range
|
|
|
|
* from 0 to metadata node size.
|
2008-09-29 19:18:18 +00:00
|
|
|
*/
|
|
|
|
|
2012-07-10 02:22:35 +00:00
|
|
|
#define DEFINE_BTRFS_SETGET_BITS(bits) \
|
2020-04-29 00:15:56 +00:00
|
|
|
u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
|
|
|
|
const void *ptr, unsigned long off) \
|
2007-10-15 20:18:56 +00:00
|
|
|
{ \
|
2020-04-29 15:45:33 +00:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
|
|
|
|
const unsigned long oil = get_eb_offset_in_folio(token->eb, \
|
|
|
|
member_offset);\
|
2024-01-05 05:35:55 +00:00
|
|
|
const int unit_size = token->eb->folio_size; \
|
|
|
|
const int unit_shift = token->eb->folio_shift; \
|
2020-04-29 15:45:33 +00:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 15:57:55 +00:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const int part = unit_size - oil; \
|
2012-07-10 02:22:35 +00:00
|
|
|
\
|
2019-08-09 15:30:23 +00:00
|
|
|
ASSERT(token); \
|
2020-04-29 17:29:04 +00:00
|
|
|
ASSERT(token->kaddr); \
|
2020-04-30 21:38:11 +00:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-29 15:45:33 +00:00
|
|
|
if (token->offset <= member_offset && \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
member_offset + size <= token->offset + unit_size) { \
|
|
|
|
return get_unaligned_le##bits(token->kaddr + oil); \
|
2012-07-10 02:22:35 +00:00
|
|
|
} \
|
2023-12-06 23:09:27 +00:00
|
|
|
token->kaddr = folio_address(token->eb->folios[idx]); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
token->offset = idx << unit_shift; \
|
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
|
|
|
|
return get_unaligned_le##bits(token->kaddr + oil); \
|
2020-04-30 15:57:55 +00:00
|
|
|
\
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
memcpy(lebytes, token->kaddr + oil, part); \
|
2023-12-06 23:09:27 +00:00
|
|
|
token->kaddr = folio_address(token->eb->folios[idx + 1]); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
token->offset = (idx + 1) << unit_shift; \
|
2020-04-30 15:57:55 +00:00
|
|
|
memcpy(lebytes + part, token->kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2007-10-15 20:18:56 +00:00
|
|
|
} \
|
2019-08-09 15:12:38 +00:00
|
|
|
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
|
|
|
|
const void *ptr, unsigned long off) \
|
|
|
|
{ \
|
2020-04-29 14:04:44 +00:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const unsigned long idx = get_eb_folio_index(eb, member_offset);\
|
|
|
|
const unsigned long oil = get_eb_offset_in_folio(eb, \
|
|
|
|
member_offset);\
|
2024-01-05 05:35:55 +00:00
|
|
|
const int unit_size = eb->folio_size; \
|
2023-12-06 23:09:27 +00:00
|
|
|
char *kaddr = folio_address(eb->folios[idx]); \
|
2020-04-29 14:04:44 +00:00
|
|
|
const int size = sizeof(u##bits); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const int part = unit_size - oil; \
|
2020-04-30 15:57:55 +00:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 15:12:38 +00:00
|
|
|
\
|
2020-04-30 21:38:11 +00:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oil + size <= unit_size) \
|
|
|
|
return get_unaligned_le##bits(kaddr + oil); \
|
2020-04-30 15:57:55 +00:00
|
|
|
\
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
memcpy(lebytes, kaddr + oil, part); \
|
2023-12-06 23:09:27 +00:00
|
|
|
kaddr = folio_address(eb->folios[idx + 1]); \
|
2020-04-30 15:57:55 +00:00
|
|
|
memcpy(lebytes + part, kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2019-08-09 15:12:38 +00:00
|
|
|
} \
|
2020-04-29 00:15:56 +00:00
|
|
|
void btrfs_set_token_##bits(struct btrfs_map_token *token, \
|
2017-06-29 03:56:53 +00:00
|
|
|
const void *ptr, unsigned long off, \
|
2020-04-29 00:15:56 +00:00
|
|
|
u##bits val) \
|
2007-10-15 20:18:56 +00:00
|
|
|
{ \
|
2020-04-29 16:23:37 +00:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const unsigned long idx = get_eb_folio_index(token->eb, member_offset); \
|
|
|
|
const unsigned long oil = get_eb_offset_in_folio(token->eb, \
|
|
|
|
member_offset);\
|
2024-01-05 05:35:55 +00:00
|
|
|
const int unit_size = token->eb->folio_size; \
|
|
|
|
const int unit_shift = token->eb->folio_shift; \
|
2020-04-29 16:23:37 +00:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 15:57:55 +00:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const int part = unit_size - oil; \
|
2012-07-10 02:22:35 +00:00
|
|
|
\
|
2019-08-09 15:30:23 +00:00
|
|
|
ASSERT(token); \
|
2020-04-29 17:29:04 +00:00
|
|
|
ASSERT(token->kaddr); \
|
2020-04-30 21:38:11 +00:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-29 16:23:37 +00:00
|
|
|
if (token->offset <= member_offset && \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
member_offset + size <= token->offset + unit_size) { \
|
|
|
|
put_unaligned_le##bits(val, token->kaddr + oil); \
|
2012-07-10 02:22:35 +00:00
|
|
|
return; \
|
|
|
|
} \
|
2023-12-06 23:09:27 +00:00
|
|
|
token->kaddr = folio_address(token->eb->folios[idx]); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
token->offset = idx << unit_shift; \
|
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
|
|
|
|
oil + size <= unit_size) { \
|
|
|
|
put_unaligned_le##bits(val, token->kaddr + oil); \
|
2012-07-10 02:22:35 +00:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 15:57:55 +00:00
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
memcpy(token->kaddr + oil, lebytes, part); \
|
2023-12-06 23:09:27 +00:00
|
|
|
token->kaddr = folio_address(token->eb->folios[idx + 1]); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
token->offset = (idx + 1) << unit_shift; \
|
2020-04-30 15:57:55 +00:00
|
|
|
memcpy(token->kaddr, lebytes + part, size - part); \
|
2019-08-09 15:12:38 +00:00
|
|
|
} \
|
2020-04-29 01:04:10 +00:00
|
|
|
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
|
2019-08-09 15:12:38 +00:00
|
|
|
unsigned long off, u##bits val) \
|
|
|
|
{ \
|
2020-04-29 16:07:04 +00:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const unsigned long idx = get_eb_folio_index(eb, member_offset);\
|
|
|
|
const unsigned long oil = get_eb_offset_in_folio(eb, \
|
|
|
|
member_offset);\
|
2024-01-05 05:35:55 +00:00
|
|
|
const int unit_size = eb->folio_size; \
|
2023-12-06 23:09:27 +00:00
|
|
|
char *kaddr = folio_address(eb->folios[idx]); \
|
2020-04-29 16:07:04 +00:00
|
|
|
const int size = sizeof(u##bits); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
const int part = unit_size - oil; \
|
2020-04-30 15:57:55 +00:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 15:12:38 +00:00
|
|
|
\
|
2020-04-30 21:38:11 +00:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || \
|
|
|
|
oil + size <= unit_size) { \
|
|
|
|
put_unaligned_le##bits(val, kaddr + oil); \
|
2019-08-09 15:12:38 +00:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 15:57:55 +00:00
|
|
|
\
|
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
btrfs: migrate get_eb_page_index() and get_eb_offset_in_page() to folios
These two functions are still using the old page based code, which is
not going to handle larger folios at all.
The migration itself is going to involve the following changes:
- PAGE_SIZE -> folio_size()
- PAGE_SHIFT -> folio_shift()
- get_eb_page_index() -> get_eb_folio_index()
- get_eb_offset_in_page() -> get_eb_offset_in_folio()
And since we're going to support larger folios, although above straight
conversion is good enough, this patch would add extra comments in the
involved functions to explain why the same single line code can now
cover 3 cases:
- folio_size == PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The common, non-subpage case with per-page folio.
- folio_size > PAGE_SIZE, sectorsize == PAGE_SIZE, nodesize >= PAGE_SIZE
The incoming larger folio, non-subpage case.
- folio_size == PAGE_SIZE, sectorsize < PAGE_SIZE, nodesize < PAGE_SIZE
The existing subpage case, we won't larger folio anyway.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2023-12-12 02:28:36 +00:00
|
|
|
memcpy(kaddr + oil, lebytes, part); \
|
2023-12-06 23:09:27 +00:00
|
|
|
kaddr = folio_address(eb->folios[idx + 1]); \
|
2020-04-30 15:57:55 +00:00
|
|
|
memcpy(kaddr, lebytes + part, size - part); \
|
2012-07-10 02:22:35 +00:00
|
|
|
}
|
2007-10-15 20:18:56 +00:00
|
|
|
|
2012-07-10 02:22:35 +00:00
|
|
|
DEFINE_BTRFS_SETGET_BITS(8)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(16)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(32)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(64)
|
2007-10-15 20:18:56 +00:00
|
|
|
|
2017-06-29 03:56:53 +00:00
|
|
|
void btrfs_node_key(const struct extent_buffer *eb,
|
2007-11-06 20:09:29 +00:00
|
|
|
struct btrfs_disk_key *disk_key, int nr)
|
|
|
|
{
|
2022-11-15 16:16:16 +00:00
|
|
|
unsigned long ptr = btrfs_node_key_ptr_offset(eb, nr);
|
2007-11-06 20:09:29 +00:00
|
|
|
read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
|
|
|
|
struct btrfs_key_ptr, key, disk_key);
|
|
|
|
}
|