mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
dma-mapping fixes for Linux 6.5
- swiotlb area sizing fixes (Petr Tesarik) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmSq57sLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOh0xAAkwklIxxzXvNlgjvy2hdgPWImPS8tGPDSIsqA9TDD WDZq89nz/ndnchdPObDvyJXmfBgqa0qCHqopBVPqMKv5a1pKZhrRXYlbajGQQwji MIqefTLZ/VGw7bDpEivOt+yadwQ3KxuxWWs7/JPKLLReSJ22H8P+jkrK7P7kBL5X YaMtZG9d86fvFHnQHKdAOlF1iCvnoZDHPcvaZbI6m5mMSZ+HIYqK5pP1MTUEAbIU MX4ZSI7/mL0q67+kZuM/NCSeq1pH0Cd0D2DGm+8k/y87G81GS6E5Wgg+xO7vYiXf 5ChuwlAO9K9KhH7NIRkKhkad/Ii89ENXSyv5gmPRoKYK5FXajnHSlJTUrZojV6XC Pbsd9ATVzV0rY61EPyh6G1a+Ttp/pwMp+W0I2fi032GVAePQ/vhB9x9O+2O/3QiC v80nUSatkciZncWqkguhp3NONsRmLKep3CCQnEAA/gLs27B0avdQeslnqbOOUQKd Si+djIoel8ONjQ+mW8eFCsVYMH1xFSo0aGsgGe0y2cyBE3DN1AW9eRnOXWT4C1PR UyDlx8ACs87ojec+YRQFYk2/PbsU7CQiH1pteXvBHcbhiVUAvrtXtg6ANQ+7066P IIduAZmlHcHb1BhyrSQbAtRllVLIp/l9IAkCSY9SvL0tjh/B5CaRBD5m2Taow5I/ KUI= =4Lfc -----END PGP SIGNATURE----- Merge tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - swiotlb area sizing fixes (Petr Tesarik) * tag 'dma-mapping-6.5-2023-07-09' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: reduce the number of areas to match actual memory pool size swiotlb: always set the number of areas before allocating the pool
This commit is contained in:
commit
f71f64210d
@ -115,9 +115,16 @@ static bool round_up_default_nslabs(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_adjust_nareas() - adjust the number of areas and slots
|
||||
* @nareas: Desired number of areas. Zero is treated as 1.
|
||||
*
|
||||
* Adjust the default number of areas in a memory pool.
|
||||
* The default size of the memory pool may also change to meet minimum area
|
||||
* size requirements.
|
||||
*/
|
||||
static void swiotlb_adjust_nareas(unsigned int nareas)
|
||||
{
|
||||
/* use a single area when non is specified */
|
||||
if (!nareas)
|
||||
nareas = 1;
|
||||
else if (!is_power_of_2(nareas))
|
||||
@ -131,6 +138,23 @@ static void swiotlb_adjust_nareas(unsigned int nareas)
|
||||
(default_nslabs << IO_TLB_SHIFT) >> 20);
|
||||
}
|
||||
|
||||
/**
|
||||
* limit_nareas() - get the maximum number of areas for a given memory pool size
|
||||
* @nareas: Desired number of areas.
|
||||
* @nslots: Total number of slots in the memory pool.
|
||||
*
|
||||
* Limit the number of areas to the maximum possible number of areas in
|
||||
* a memory pool of the given size.
|
||||
*
|
||||
* Return: Maximum possible number of areas.
|
||||
*/
|
||||
static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
|
||||
{
|
||||
if (nslots < nareas * IO_TLB_SEGSIZE)
|
||||
return nslots / IO_TLB_SEGSIZE;
|
||||
return nareas;
|
||||
}
|
||||
|
||||
static int __init
|
||||
setup_io_tlb_npages(char *str)
|
||||
{
|
||||
@ -290,6 +314,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
unsigned long nslabs;
|
||||
unsigned int nareas;
|
||||
size_t alloc_size;
|
||||
void *tlb;
|
||||
|
||||
@ -298,18 +323,16 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
if (swiotlb_force_disable)
|
||||
return;
|
||||
|
||||
/*
|
||||
* default_nslabs maybe changed when adjust area number.
|
||||
* So allocate bounce buffer after adjusting area number.
|
||||
*/
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
nslabs = default_nslabs;
|
||||
nareas = limit_nareas(default_nareas, nslabs);
|
||||
while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
|
||||
if (nslabs <= IO_TLB_MIN_SLABS)
|
||||
return;
|
||||
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
|
||||
nareas = limit_nareas(nareas, nslabs);
|
||||
}
|
||||
|
||||
if (default_nslabs != nslabs) {
|
||||
@ -355,6 +378,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
unsigned int nareas;
|
||||
unsigned char *vstart = NULL;
|
||||
unsigned int order, area_order;
|
||||
bool retried = false;
|
||||
@ -363,6 +387,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
if (swiotlb_force_disable)
|
||||
return 0;
|
||||
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
retry:
|
||||
order = get_order(nslabs << IO_TLB_SHIFT);
|
||||
nslabs = SLABS_PER_PAGE << order;
|
||||
@ -397,11 +424,8 @@ retry:
|
||||
(PAGE_SIZE << order) >> 20);
|
||||
}
|
||||
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
area_order = get_order(array_size(sizeof(*mem->areas),
|
||||
default_nareas));
|
||||
nareas = limit_nareas(default_nareas, nslabs);
|
||||
area_order = get_order(array_size(sizeof(*mem->areas), nareas));
|
||||
mem->areas = (struct io_tlb_area *)
|
||||
__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
|
||||
if (!mem->areas)
|
||||
@ -415,7 +439,7 @@ retry:
|
||||
set_memory_decrypted((unsigned long)vstart,
|
||||
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
|
||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
|
||||
default_nareas);
|
||||
nareas);
|
||||
|
||||
swiotlb_print_info();
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user