forked from Minki/linux
powerpc/mm/hash: Add hpte_get_old_v and use that instead of opencoding
No functional change Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
1531cff44b
commit
a833280b4a
@ -364,6 +364,16 @@ static inline unsigned long hpte_new_to_old_r(unsigned long r)
|
|||||||
return r & ~HPTE_R_3_0_SSIZE_MASK;
|
return r & ~HPTE_R_3_0_SSIZE_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
|
||||||
|
{
|
||||||
|
unsigned long hpte_v;
|
||||||
|
|
||||||
|
hpte_v = be64_to_cpu(hptep->v);
|
||||||
|
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
|
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
||||||
|
return hpte_v;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function sets the AVPN and L fields of the HPTE appropriately
|
* This function sets the AVPN and L fields of the HPTE appropriately
|
||||||
* using the base page size and actual page size.
|
* using the base page size and actual page size.
|
||||||
|
@ -423,9 +423,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|||||||
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
|
DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
|
||||||
vpn, want_v & HPTE_V_AVPN, slot, newpp);
|
vpn, want_v & HPTE_V_AVPN, slot, newpp);
|
||||||
|
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
|
||||||
/*
|
/*
|
||||||
* We need to invalidate the TLB always because hpte_remove doesn't do
|
* We need to invalidate the TLB always because hpte_remove doesn't do
|
||||||
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
|
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
|
||||||
@ -439,9 +437,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|||||||
} else {
|
} else {
|
||||||
native_lock_hpte(hptep);
|
native_lock_hpte(hptep);
|
||||||
/* recheck with locks held */
|
/* recheck with locks held */
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
|
||||||
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
|
if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
|
||||||
!(hpte_v & HPTE_V_VALID))) {
|
!(hpte_v & HPTE_V_VALID))) {
|
||||||
ret = -1;
|
ret = -1;
|
||||||
@ -481,11 +477,9 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
|
|||||||
/* Bolted mappings are only ever in the primary group */
|
/* Bolted mappings are only ever in the primary group */
|
||||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||||
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
||||||
hptep = htab_address + slot;
|
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
|
||||||
|
|
||||||
|
hptep = htab_address + slot;
|
||||||
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
|
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
|
||||||
/* HPTE matches */
|
/* HPTE matches */
|
||||||
return slot;
|
return slot;
|
||||||
@ -575,9 +569,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
|||||||
|
|
||||||
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
|
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
|
||||||
native_lock_hpte(hptep);
|
native_lock_hpte(hptep);
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to invalidate the TLB always because hpte_remove doesn't do
|
* We need to invalidate the TLB always because hpte_remove doesn't do
|
||||||
@ -635,9 +627,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
|
|||||||
hptep = htab_address + slot;
|
hptep = htab_address + slot;
|
||||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||||
native_lock_hpte(hptep);
|
native_lock_hpte(hptep);
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
|
|
||||||
|
|
||||||
/* Even if we miss, we need to invalidate the TLB */
|
/* Even if we miss, we need to invalidate the TLB */
|
||||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
|
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
|
||||||
@ -813,10 +803,7 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|||||||
hptep = htab_address + slot;
|
hptep = htab_address + slot;
|
||||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||||
native_lock_hpte(hptep);
|
native_lock_hpte(hptep);
|
||||||
hpte_v = be64_to_cpu(hptep->v);
|
hpte_v = hpte_get_old_v(hptep);
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
|
||||||
hpte_v = hpte_new_to_old_v(hpte_v,
|
|
||||||
be64_to_cpu(hptep->r));
|
|
||||||
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
|
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
|
||||||
!(hpte_v & HPTE_V_VALID))
|
!(hpte_v & HPTE_V_VALID))
|
||||||
native_unlock_hpte(hptep);
|
native_unlock_hpte(hptep);
|
||||||
|
Loading…
Reference in New Issue
Block a user