linux/arch/sparc/include/asm/mmu_64.h
David S. Miller 37b3a8ff3e sparc64: Move from 4MB to 8MB huge pages.
The impetus for this is that we would like to move to 64-bit PMDs and
PGDs, but that would result in only supporting a 42-bit address space
with the current page table layout.  It'd be nice to support at least
43-bits.

The reason we'd end up with only 42-bits after making PMDs and PGDs
64-bit is that we only use half-page sized PTE tables in order to make
PMDs line up to 4MB, the hardware huge page size we use.

So what we do here is we make huge pages 8MB, and fabricate them using
4MB hw TLB entries.

Facilitate this by providing a "REAL_HPAGE_SHIFT" which is used in
places that really need to operate on hardware 4MB pages.

Use full pages (512 entries) for PTE tables, and adjust PMD_SHIFT,
PGD_SHIFT, and the build time CPP test as needed.  Use a CPP test to
make sure REAL_HPAGE_SHIFT and the _PAGE_SZHUGE_* we use match up.

This makes the pgtable cache completely unused, so remove the code
managing it and the state used in mm_context_t.  Now we have less
spinlocks taken in the page table allocation path.

The technique we use to fabricate the 8MB pages is to transfer bit 22
from the missing virtual address into the PTEs physical address field.
That takes care of the transparent huge pages case.

For hugetlb, we fill things in at the PTE level and that code already
puts the sub huge page physical bits into the PTEs, based upon the
offset, so there is nothing special we need to do.  It all just works
out.

So, a small amount of complexity in the THP case, but this code is
about to get much simpler when we move the 64-bit PMDs as we can move
away from the fancy 32-bit huge PMD encoding and just put a real PTE
value in there.

With bug fixes and help from Bob Picco.

Signed-off-by: David S. Miller <davem@davemloft.net>
2013-11-12 15:22:34 -08:00

110 lines
3.1 KiB
C

#ifndef __MMU_H
#define __MMU_H
#include <linux/const.h>
#include <asm/page.h>
#include <asm/hypervisor.h>
#define CTX_NR_BITS 13
#define TAG_CONTEXT_BITS ((_AC(1,UL) << CTX_NR_BITS) - _AC(1,UL))
/* UltraSPARC-III+ and later have a feature whereby you can
* select what page size the various Data-TLB instances in the
* chip. In order to gracefully support this, we put the version
* field in a spot outside of the areas of the context register
* where this parameter is specified.
*/
#define CTX_VERSION_SHIFT 22
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_PGSZ_8KB _AC(0x0,UL)
#define CTX_PGSZ_64KB _AC(0x1,UL)
#define CTX_PGSZ_512KB _AC(0x2,UL)
#define CTX_PGSZ_4MB _AC(0x3,UL)
#define CTX_PGSZ_BITS _AC(0x7,UL)
#define CTX_PGSZ0_NUC_SHIFT 61
#define CTX_PGSZ1_NUC_SHIFT 58
#define CTX_PGSZ0_SHIFT 16
#define CTX_PGSZ1_SHIFT 19
#define CTX_PGSZ_MASK ((CTX_PGSZ_BITS << CTX_PGSZ0_SHIFT) | \
(CTX_PGSZ_BITS << CTX_PGSZ1_SHIFT))
#define CTX_PGSZ_BASE CTX_PGSZ_8KB
#define CTX_PGSZ_HUGE CTX_PGSZ_4MB
#define CTX_PGSZ_KERN CTX_PGSZ_4MB
/* Thus, when running on UltraSPARC-III+ and later, we use the following
* PRIMARY_CONTEXT register values for the kernel context.
*/
#define CTX_CHEETAH_PLUS_NUC \
((CTX_PGSZ_KERN << CTX_PGSZ0_NUC_SHIFT) | \
(CTX_PGSZ_BASE << CTX_PGSZ1_NUC_SHIFT))
#define CTX_CHEETAH_PLUS_CTX0 \
((CTX_PGSZ_KERN << CTX_PGSZ0_SHIFT) | \
(CTX_PGSZ_BASE << CTX_PGSZ1_SHIFT))
/* If you want "the TLB context number" use CTX_NR_MASK. If you
* want "the bits I program into the context registers" use
* CTX_HW_MASK.
*/
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
#define CTX_NRBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_NR_MASK)
#ifndef __ASSEMBLY__
#define TSB_ENTRY_ALIGNMENT 16
struct tsb {
unsigned long tag;
unsigned long pte;
} __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
extern void tsb_flush(unsigned long ent, unsigned long tag);
extern void tsb_init(struct tsb *tsb, unsigned long size);
struct tsb_config {
struct tsb *tsb;
unsigned long tsb_rss_limit;
unsigned long tsb_nentries;
unsigned long tsb_reg_val;
unsigned long tsb_map_vaddr;
unsigned long tsb_map_pte;
};
#define MM_TSB_BASE 0
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
#define MM_TSB_HUGE 1
#define MM_NUM_TSBS 2
#else
#define MM_NUM_TSBS 1
#endif
typedef struct {
spinlock_t lock;
unsigned long sparc64_ctx_val;
unsigned long huge_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
} mm_context_t;
#endif /* !__ASSEMBLY__ */
#define TSB_CONFIG_TSB 0x00
#define TSB_CONFIG_RSS_LIMIT 0x08
#define TSB_CONFIG_NENTRIES 0x10
#define TSB_CONFIG_REG_VAL 0x18
#define TSB_CONFIG_MAP_VADDR 0x20
#define TSB_CONFIG_MAP_PTE 0x28
#endif /* __MMU_H */