forked from Minki/linux
x86: change write msr functions interface
This patche changes the native_write_msr() and friends interface to explicitly take 2 32-bit registers instead of a 64-bit value. The change will ease the merge with 64-bit code. As the 64-bit value will be passed as two registers anyway in i386, the PVOP_CALL interface has to account for that and use low/high parameters It would force the x86_64 version to be different. The change does not make i386 generated code less efficient. As said above, it would get the values from two registers anyway. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
b8d1fae7db
commit
c9dcda5ce4
@ -63,13 +63,14 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr, unsigned long long val)
|
||||
static inline void native_write_msr(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (msr), "A"(val));
|
||||
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high));
|
||||
}
|
||||
|
||||
static inline int native_write_msr_safe(unsigned int msr,
|
||||
unsigned long long val)
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
int err;
|
||||
asm volatile("2: wrmsr ; xorl %0,%0\n"
|
||||
@ -82,7 +83,7 @@ static inline int native_write_msr_safe(unsigned int msr,
|
||||
" .long 2b,3b\n\t"
|
||||
".previous"
|
||||
: "=a" (err)
|
||||
: "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
|
||||
: "c" (msr), "0" (low), "d" (high),
|
||||
"i" (-EFAULT));
|
||||
return err;
|
||||
}
|
||||
@ -118,20 +119,20 @@ static inline unsigned long long native_read_pmc(int counter)
|
||||
(val2) = (u32)(__val >> 32); \
|
||||
} while(0)
|
||||
|
||||
static inline void wrmsr(u32 __msr, u32 __low, u32 __high)
|
||||
static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
native_write_msr(__msr, ((u64)__high << 32) | __low);
|
||||
native_write_msr(msr, low, high);
|
||||
}
|
||||
|
||||
#define rdmsrl(msr,val) \
|
||||
((val) = native_read_msr(msr))
|
||||
|
||||
#define wrmsrl(msr,val) native_write_msr(msr, val)
|
||||
#define wrmsrl(msr, val) native_write_msr(msr, (u32)val, (u32)(val >> 32))
|
||||
|
||||
/* wrmsr with exception handling */
|
||||
static inline int wrmsr_safe(u32 __msr, u32 __low, u32 __high)
|
||||
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return native_write_msr_safe(__msr, ((u64)__high << 32) | __low);
|
||||
return native_write_msr_safe(msr, low, high);
|
||||
}
|
||||
|
||||
/* rdmsr with exception handling */
|
||||
|
@ -115,7 +115,7 @@ struct pv_cpu_ops {
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*write_msr)(unsigned int msr, u64 val);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
|
||||
u64 (*read_tsc)(void);
|
||||
u64 (*read_pmc)(int counter);
|
||||
|
Loading…
Reference in New Issue
Block a user