mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
net: attempt a single high order allocation
In commit ed98df3361
("net: use __GFP_NORETRY for high order
allocations") we tried to address one issue caused by order-3
allocations.
We still observe high latencies and system overhead in situations where
compaction is not successful.
Instead of trying order-3, order-2, and order-1, do a single order-3
best effort and immediately fallback to plain order-0.
This mimics slub strategy to fallback to slab min order if the high
order allocation used for performance failed.
Order-3 allocations give a performance boost only if they can be done
without recurring and expensive memory scan.
Quoting David :
The page allocator relies on synchronous (sync light) memory compaction
after direct reclaim for allocations that don't retry and deferred
compaction doesn't work with this strategy because the allocation order
is always decreasing from the previous failed attempt.
This means sync light compaction will always be encountered if memory
cannot be defragmented or reclaimed several times during the
skb_page_frag_refill() iteration.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bcc735473c
commit
d9b2938aab
@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
|
|||||||
order);
|
order);
|
||||||
if (page)
|
if (page)
|
||||||
goto fill_page;
|
goto fill_page;
|
||||||
|
/* Do not retry other high order allocations */
|
||||||
|
order = 1;
|
||||||
|
max_page_order = 0;
|
||||||
}
|
}
|
||||||
order--;
|
order--;
|
||||||
}
|
}
|
||||||
@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
|
|||||||
* no guarantee that allocations succeed. Therefore, @sz MUST be
|
* no guarantee that allocations succeed. Therefore, @sz MUST be
|
||||||
* less or equal than PAGE_SIZE.
|
* less or equal than PAGE_SIZE.
|
||||||
*/
|
*/
|
||||||
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
|
||||||
{
|
{
|
||||||
int order;
|
|
||||||
|
|
||||||
if (pfrag->page) {
|
if (pfrag->page) {
|
||||||
if (atomic_read(&pfrag->page->_count) == 1) {
|
if (atomic_read(&pfrag->page->_count) == 1) {
|
||||||
pfrag->offset = 0;
|
pfrag->offset = 0;
|
||||||
@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
|
|||||||
put_page(pfrag->page);
|
put_page(pfrag->page);
|
||||||
}
|
}
|
||||||
|
|
||||||
order = SKB_FRAG_PAGE_ORDER;
|
pfrag->offset = 0;
|
||||||
do {
|
if (SKB_FRAG_PAGE_ORDER) {
|
||||||
gfp_t gfp = prio;
|
pfrag->page = alloc_pages(gfp | __GFP_COMP |
|
||||||
|
__GFP_NOWARN | __GFP_NORETRY,
|
||||||
if (order)
|
SKB_FRAG_PAGE_ORDER);
|
||||||
gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
|
|
||||||
pfrag->page = alloc_pages(gfp, order);
|
|
||||||
if (likely(pfrag->page)) {
|
if (likely(pfrag->page)) {
|
||||||
pfrag->offset = 0;
|
pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
|
||||||
pfrag->size = PAGE_SIZE << order;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} while (--order >= 0);
|
}
|
||||||
|
pfrag->page = alloc_page(gfp);
|
||||||
|
if (likely(pfrag->page)) {
|
||||||
|
pfrag->size = PAGE_SIZE;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(skb_page_frag_refill);
|
EXPORT_SYMBOL(skb_page_frag_refill);
|
||||||
|
Loading…
Reference in New Issue
Block a user