[PATCH] ppc64: remove another fixed address constraint

Presently the LparMap, one of the structures the kernel shares with the
legacy iSeries hypervisor has a fixed offset address in head.S.  This patch
changes this so the LparMap is a normally initialized structure, without
fixed address.  This allows us to use macros to compute some of the values
in the structure, which wasn't previously possible because the assembler
always uses signed-% which gets the wrong answers for the computations in
question.

Unfortunately, a gcc bug means that doing this requires another structure
(hvReleaseData) to be initialized in asm instead of C, but on the whole the
result is cleaner than before.

Signed-off-by: David Gibson <dwg@au1.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
David Gibson 2005-07-27 11:44:21 -07:00 committed by Linus Torvalds
parent 533f08172e
commit 488f84994c
5 changed files with 109 additions and 49 deletions

View File

@ -33,17 +33,36 @@
* the hypervisor and Linux.
*/
/*
* WARNING - magic here
*
* Ok, this is a horrid hack below, but marginally better than the
* alternatives. What we really want is just to initialize
* hvReleaseData in C as in the #if 0 section here. However, gcc
* refuses to believe that (u32)&x is a constant expression, so will
* not allow the xMsNucDataOffset field to be properly initialized.
* So, we declare hvReleaseData in inline asm instead. We use inline
* asm, rather than a .S file, because the assembler won't generate
* the necessary relocation for the LparMap either, unless that symbol
* is declared in the same source file. Finally, we put the asm in a
* dummy, attribute-used function, instead of at file scope, because
* file scope asms don't allow contraints. We want to use the "i"
* constraints to put sizeof() and offsetof() expressions in there,
* because including asm/offsets.h in C code then stringifying causes
* all manner of warnings.
*/
#if 0
struct HvReleaseData hvReleaseData = {
.xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
.xSize = sizeof(struct HvReleaseData),
.xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
.xSlicNacaAddr = &naca, /* 64-bit Naca address */
.xMsNucDataOffset = 0x4800, /* offset of LparMap within loadarea (see head.S) */
.xTagsMode = 1, /* tags inactive */
.xAddressSize = 0, /* 64 bit */
.xNoSharedProcs = 0, /* shared processors */
.xNoHMT = 0, /* HMT allowed */
.xRsvd2 = 6, /* TEMP: This allows non-GA driver */
.xMsNucDataOffset = (u32)((unsigned long)&xLparMap - KERNELBASE),
.xFlags = HVREL_TAGSINACTIVE /* tags inactive */
/* 64 bit */
/* shared processors */
/* HMT allowed */
| 6, /* TEMP: This allows non-GA driver */
.xVrmIndex = 4, /* We are v5r2m0 */
.xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
.xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
@ -51,6 +70,63 @@ struct HvReleaseData hvReleaseData = {
0xa7, 0x40, 0xf2, 0x4b,
0xf4, 0x4b, 0xf6, 0xf4 },
};
#endif
extern struct HvReleaseData hvReleaseData;
static void __attribute_used__ hvReleaseData_wrapper(void)
{
/* This doesn't appear to need any alignment (even 4 byte) */
asm volatile (
" lparMapPhys = xLparMap - %3\n"
" .data\n"
" .globl hvReleaseData\n"
"hvReleaseData:\n"
" .long 0xc8a5d9c4\n" /* xDesc */
/* "HvRD" in ebcdic */
" .short %0\n" /* xSize */
" .short %1\n" /* xVpdAreasPtrOffset */
" .llong naca\n" /* xSlicNacaAddr */
" .long lparMapPhys\n" /* xMsNucDataOffset */
" .long 0\n" /* xRsvd1 */
" .short %2\n" /* xFlags */
" .short 4\n" /* xVrmIndex - v5r2m0 */
" .short 3\n" /* xMinSupportedPlicVrmIndex - v5r1m0 */
" .short 3\n" /* xMinCompatablePlicVrmIndex - v5r1m0 */
" .long 0xd38995a4\n" /* xVrmName */
" .long 0xa740f24b\n" /* "Linux 2.4.64" ebcdic */
" .long 0xf44bf6f4\n"
" . = hvReleaseData + %0\n"
" .previous\n"
: : "i"(sizeof(hvReleaseData)),
"i"(offsetof(struct naca_struct, xItVpdAreas)),
"i"(HVREL_TAGSINACTIVE /* tags inactive, 64 bit, */
/* shared processors, HMT allowed */
| 6), /* TEMP: This allows non-GA drivers */
"i"(KERNELBASE)
);
}
struct LparMap __attribute__((aligned (16))) xLparMap = {
.xNumberEsids = HvEsidsToMap,
.xNumberRanges = HvRangesToMap,
.xSegmentTableOffs = STAB0_PAGE,
.xEsids = {
{ .xKernelEsid = GET_ESID(KERNELBASE),
.xKernelVsid = KERNEL_VSID(KERNELBASE), },
{ .xKernelEsid = GET_ESID(VMALLOCBASE),
.xKernelVsid = KERNEL_VSID(VMALLOCBASE), },
},
.xRanges = {
{ .xPages = HvPagesToMap,
.xOffset = 0,
.xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - PAGE_SHIFT),
},
},
};
extern void system_reset_iSeries(void);
extern void machine_check_iSeries(void);

View File

@ -522,36 +522,9 @@ __end_interrupts:
#ifdef CONFIG_PPC_ISERIES
.globl naca
naca:
.llong itVpdAreas
/*
* The iSeries LPAR map is at this fixed address
* so that the HvReleaseData structure can address
* it with a 32-bit offset.
*
* The VSID values below are dependent on the
* VSID generation algorithm. See include/asm/mmu_context.h.
*/
. = 0x4800
.llong 2 /* # ESIDs to be mapped by hypervisor */
.llong 1 /* # memory ranges to be mapped by hypervisor */
.llong STAB0_PAGE /* Page # of segment table within load area */
.llong 0 /* Reserved */
.llong 0 /* Reserved */
.llong 0 /* Reserved */
.llong 0 /* Reserved */
.llong 0 /* Reserved */
.llong (KERNELBASE>>SID_SHIFT)
.llong 0x408f92c94 /* KERNELBASE VSID */
/* We have to list the bolted VMALLOC segment here, too, so that it
* will be restored on shared processor switch */
.llong (VMALLOCBASE>>SID_SHIFT)
.llong 0xf09b89af5 /* VMALLOCBASE VSID */
.llong 8192 /* # pages to map (32 MB) */
.llong 0 /* Offset from start of loadarea to start of map */
.llong 0x408f92c940000 /* VPN of first page to map */
.llong itVpdAreas
.llong 0 /* xRamDisk */
.llong 0 /* xRamDiskSize */
. = 0x6100

View File

@ -39,6 +39,11 @@
* know that this PLIC does not support running an OS "that old".
*/
#define HVREL_TAGSINACTIVE 0x8000
#define HVREL_32BIT 0x4000
#define HVREL_NOSHAREDPROCS 0x2000
#define HVREL_NOHMT 0x1000
struct HvReleaseData {
u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
u16 xSize; /* Size of this control block x04-x05 */
@ -46,11 +51,7 @@ struct HvReleaseData {
struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
u32 xRsvd1; /* Reserved x14-x17 */
u16 xTagsMode:1; /* 0 == tags active, 1 == tags inactive */
u16 xAddressSize:1; /* 0 == 64-bit, 1 == 32-bit */
u16 xNoSharedProcs:1; /* 0 == shared procs, 1 == no shared */
u16 xNoHMT:1; /* 0 == allow HMT, 1 == no HMT */
u16 xRsvd2:12; /* Reserved x18-x19 */
u16 xFlags;
u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */

View File

@ -49,19 +49,26 @@
* entry to map the Esid to the Vsid.
*/
#define HvEsidsToMap 2
#define HvRangesToMap 1
/* Hypervisor initially maps 32MB of the load area */
#define HvPagesToMap 8192
struct LparMap {
u64 xNumberEsids; // Number of ESID/VSID pairs (1)
u64 xNumberRanges; // Number of VA ranges to map (1)
u64 xSegmentTableOffs; // Page number within load area of seg table (0)
u64 xNumberEsids; // Number of ESID/VSID pairs
u64 xNumberRanges; // Number of VA ranges to map
u64 xSegmentTableOffs; // Page number within load area of seg table
u64 xRsvd[5];
u64 xKernelEsid; // Esid used to map kernel load (0x0C00000000)
u64 xKernelVsid; // Vsid used to map kernel load (0x0C00000000)
u64 xPages; // Number of pages to be mapped (8192)
u64 xOffset; // Offset from start of load area (0)
u64 xVPN; // Virtual Page Number (0x000C000000000000)
struct {
u64 xKernelEsid; // Esid used to map kernel load
u64 xKernelVsid; // Vsid used to map kernel load
} xEsids[HvEsidsToMap];
struct {
u64 xPages; // Number of pages to be mapped
u64 xOffset; // Offset from start of load area
u64 xVPN; // Virtual Page Number
} xRanges[HvRangesToMap];
};
extern struct LparMap xLparMap;

View File

@ -338,6 +338,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
| (ea >> SID_SHIFT));
}
#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
#endif /* __ASSEMBLY */
#endif /* _PPC64_MMU_H_ */