sparc64: Add 16GB hugepage support
Adds support for 16GB hugepage size. To use this page size use kernel parameters as: default_hugepagesz=16G hugepagesz=16G hugepages=10 Testing: Tested with the stream benchmark which allocates 48G of arrays backed by 16G hugepages and does RW operation on them in parallel. Orabug: 25362942 Cc: Anthony Yznaga <anthony.yznaga@oracle.com> Reviewed-by: Bob Picco <bob.picco@oracle.com> Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
44382b0195
commit
df7b2155bb
@@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void)
|
||||
|
||||
arch_initcall(hugetlbpage_init);
|
||||
|
||||
static void __init pud_huge_patch(void)
|
||||
{
|
||||
struct pud_huge_patch_entry *p;
|
||||
unsigned long addr;
|
||||
|
||||
p = &__pud_huge_patch;
|
||||
addr = p->addr;
|
||||
*(unsigned int *)addr = p->insn;
|
||||
|
||||
__asm__ __volatile__("flush %0" : : "r" (addr));
|
||||
}
|
||||
|
||||
static int __init setup_hugepagesz(char *string)
|
||||
{
|
||||
unsigned long long hugepage_size;
|
||||
@@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string)
|
||||
hugepage_shift = ilog2(hugepage_size);
|
||||
|
||||
switch (hugepage_shift) {
|
||||
case HPAGE_16GB_SHIFT:
|
||||
hv_pgsz_mask = HV_PGSZ_MASK_16GB;
|
||||
hv_pgsz_idx = HV_PGSZ_IDX_16GB;
|
||||
pud_huge_patch();
|
||||
break;
|
||||
case HPAGE_2GB_SHIFT:
|
||||
hv_pgsz_mask = HV_PGSZ_MASK_2GB;
|
||||
hv_pgsz_idx = HV_PGSZ_IDX_2GB;
|
||||
@@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
unsigned long flags;
|
||||
bool is_huge_tsb;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
if (tlb_type != hypervisor) {
|
||||
@@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
is_huge_tsb = false;
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
|
||||
is_hugetlb_pmd(__pmd(pte_val(pte)))) {
|
||||
/* We are fabricating 8MB pages using 4MB real hw pages. */
|
||||
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
|
||||
__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
|
||||
address, pte_val(pte));
|
||||
} else
|
||||
if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
|
||||
unsigned long hugepage_size = PAGE_SIZE;
|
||||
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
hugepage_size = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (hugepage_size >= PUD_SIZE) {
|
||||
unsigned long mask = 0x1ffc00000UL;
|
||||
|
||||
/* Transfer bits [32:22] from address to resolve
|
||||
* at 4M granularity.
|
||||
*/
|
||||
pte_val(pte) &= ~mask;
|
||||
pte_val(pte) |= (address & mask);
|
||||
} else if (hugepage_size >= PMD_SIZE) {
|
||||
/* We are fabricating 8MB pages using 4MB
|
||||
* real hw pages.
|
||||
*/
|
||||
pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
|
||||
}
|
||||
|
||||
if (hugepage_size >= PMD_SIZE) {
|
||||
__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
|
||||
REAL_HPAGE_SHIFT, address, pte_val(pte));
|
||||
is_huge_tsb = true;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!is_huge_tsb)
|
||||
__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
|
||||
address, pte_val(pte));
|
||||
|
||||
|
||||
Reference in New Issue
Block a user