Merge branch 'sh/stable-updates'

This commit is contained in:
Paul Mundt 2009-11-09 10:55:36 +09:00
commit 76d2318020
52 changed files with 776 additions and 312 deletions

View File

@ -11,6 +11,9 @@ config M32R
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
config SBUS config SBUS
bool bool

View File

@ -4,8 +4,8 @@
# create a compressed vmlinux image from the original vmlinux # create a compressed vmlinux image from the original vmlinux
# #
targets := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o \ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
piggy.o vmlinux.lds vmlinux.bin.lzma head.o misc.o piggy.o vmlinux.lds
OBJECTS = $(obj)/head.o $(obj)/misc.o OBJECTS = $(obj)/head.o $(obj)/misc.o
@ -27,6 +27,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE
$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip) $(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
$(call if_changed,bzip2)
$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzma)
CFLAGS_misc.o += -fpic CFLAGS_misc.o += -fpic
ifdef CONFIG_MMU ifdef CONFIG_MMU
@ -37,5 +43,9 @@ endif
OBJCOPYFLAGS += -R .empty_zero_page OBJCOPYFLAGS += -R .empty_zero_page
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE suffix_$(CONFIG_KERNEL_GZIP) = gz
suffix_$(CONFIG_KERNEL_BZIP2) = bz2
suffix_$(CONFIG_KERNEL_LZMA) = lzma
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
$(call if_changed,ld) $(call if_changed,ld)

View File

@ -9,140 +9,49 @@
* Adapted for SH by Stuart Menefy, Aug 1999 * Adapted for SH by Stuart Menefy, Aug 1999
* *
* 2003-02-12: Support M32R by Takeo Takahashi * 2003-02-12: Support M32R by Takeo Takahashi
* This is based on arch/sh/boot/compressed/misc.c.
*/ */
#include <linux/string.h>
/* /*
* gzip declarations * gzip declarations
*/ */
#define OF(args) args
#define STATIC static #define STATIC static
#undef memset #undef memset
#undef memcpy #undef memcpy
#define memzero(s, n) memset ((s), 0, (n)) #define memzero(s, n) memset ((s), 0, (n))
typedef unsigned char uch;
typedef unsigned short ush;
typedef unsigned long ulg;
#define WSIZE 0x8000 /* Window size must be at least 32k, */
/* and a power of two */
static uch *inbuf; /* input buffer */
static uch window[WSIZE]; /* Sliding window buffer */
static unsigned insize = 0; /* valid bytes in inbuf */
static unsigned inptr = 0; /* index of next byte to be processed in inbuf */
static unsigned outcnt = 0; /* bytes in output buffer */
/* gzip flag byte */
#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
#define COMMENT 0x10 /* bit 4 set: file comment present */
#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
#define RESERVED 0xC0 /* bit 6,7: reserved */
#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
/* Diagnostic functions */
#ifdef DEBUG
# define Assert(cond,msg) {if(!(cond)) error(msg);}
# define Trace(x) fprintf x
# define Tracev(x) {if (verbose) fprintf x ;}
# define Tracevv(x) {if (verbose>1) fprintf x ;}
# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
#else
# define Assert(cond,msg)
# define Trace(x)
# define Tracev(x)
# define Tracevv(x)
# define Tracec(c,x)
# define Tracecv(c,x)
#endif
static int fill_inbuf(void);
static void flush_window(void);
static void error(char *m); static void error(char *m);
static unsigned char *input_data;
static int input_len;
static long bytes_out = 0;
static uch *output_data;
static unsigned long output_ptr = 0;
#include "m32r_sio.c" #include "m32r_sio.c"
static unsigned long free_mem_ptr; static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr; static unsigned long free_mem_end_ptr;
#define HEAP_SIZE 0x10000 #ifdef CONFIG_KERNEL_BZIP2
static void *memset(void *s, int c, size_t n)
#include "../../../../lib/inflate.c"
void* memset(void* s, int c, size_t n)
{ {
int i; char *ss = s;
char *ss = (char*)s;
for (i=0;i<n;i++) ss[i] = c; while (n--)
*ss++ = c;
return s; return s;
} }
#endif
void* memcpy(void* __dest, __const void* __src, #ifdef CONFIG_KERNEL_GZIP
size_t __n) #define BOOT_HEAP_SIZE 0x10000
{ #include "../../../../lib/decompress_inflate.c"
int i; #endif
char *d = (char *)__dest, *s = (char *)__src;
for (i=0;i<__n;i++) d[i] = s[i]; #ifdef CONFIG_KERNEL_BZIP2
return __dest; #define BOOT_HEAP_SIZE 0x400000
} #include "../../../../lib/decompress_bunzip2.c"
#endif
/* =========================================================================== #ifdef CONFIG_KERNEL_LZMA
* Fill the input buffer. This is called only when the buffer is empty #define BOOT_HEAP_SIZE 0x10000
* and at least one byte is really needed. #include "../../../../lib/decompress_unlzma.c"
*/ #endif
static int fill_inbuf(void)
{
if (insize != 0) {
error("ran out of input data");
}
inbuf = input_data;
insize = input_len;
inptr = 1;
return inbuf[0];
}
/* ===========================================================================
* Write the output window window[0..outcnt-1] and update crc and bytes_out.
* (Used for the decompressed data only.)
*/
static void flush_window(void)
{
ulg c = crc; /* temporary variable */
unsigned n;
uch *in, *out, ch;
in = window;
out = &output_data[output_ptr];
for (n = 0; n < outcnt; n++) {
ch = *out++ = *in++;
c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
}
crc = c;
bytes_out += (ulg)outcnt;
output_ptr += (ulg)outcnt;
outcnt = 0;
}
static void error(char *x) static void error(char *x)
{ {
@ -153,20 +62,20 @@ static void error(char *x)
while(1); /* Halt */ while(1); /* Halt */
} }
/* return decompressed size */
void void
decompress_kernel(int mmu_on, unsigned char *zimage_data, decompress_kernel(int mmu_on, unsigned char *zimage_data,
unsigned int zimage_len, unsigned long heap) unsigned int zimage_len, unsigned long heap)
{ {
unsigned char *input_data = zimage_data;
int input_len = zimage_len;
unsigned char *output_data;
output_data = (unsigned char *)CONFIG_MEMORY_START + 0x2000 output_data = (unsigned char *)CONFIG_MEMORY_START + 0x2000
+ (mmu_on ? 0x80000000 : 0); + (mmu_on ? 0x80000000 : 0);
free_mem_ptr = heap; free_mem_ptr = heap;
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
input_data = zimage_data;
input_len = zimage_len;
makecrc(); puts("\nDecompressing Linux... ");
puts("Uncompressing Linux... "); decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
gunzip(); puts("done.\nBooting the kernel.\n");
puts("Ok, booting the kernel.\n");
} }

View File

@ -806,7 +806,7 @@ unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
if (mask & ~physids_coerce(phys_cpu_present_map)) if (mask & ~physids_coerce(phys_cpu_present_map))
BUG(); BUG();
if (ipi_num >= NR_IPIS) if (ipi_num >= NR_IPIS || ipi_num < 0)
BUG(); BUG();
mask <<= IPI_SHIFT; mask <<= IPI_SHIFT;

View File

@ -75,7 +75,7 @@ u32 arch_gettimeoffset(void)
count = 0; count = 0;
count = (latch - count) * TICK_SIZE; count = (latch - count) * TICK_SIZE;
elapsed_time = (count + latch / 2) / latch; elapsed_time = DIV_ROUND_CLOSEST(count, latch);
/* NOTE: LATCH is equal to the "interval" value (= reload count). */ /* NOTE: LATCH is equal to the "interval" value (= reload count). */
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
@ -93,7 +93,7 @@ u32 arch_gettimeoffset(void)
p_count = count; p_count = count;
count = (latch - count) * TICK_SIZE; count = (latch - count) * TICK_SIZE;
elapsed_time = (count + latch / 2) / latch; elapsed_time = DIV_ROUND_CLOSEST(count, latch);
/* NOTE: LATCH is equal to the "interval" value (= reload count). */ /* NOTE: LATCH is equal to the "interval" value (= reload count). */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#elif defined(CONFIG_CHIP_M32310) #elif defined(CONFIG_CHIP_M32310)
@ -211,7 +211,7 @@ void __init time_init(void)
bus_clock = boot_cpu_data.bus_clock; bus_clock = boot_cpu_data.bus_clock;
divide = boot_cpu_data.timer_divide; divide = boot_cpu_data.timer_divide;
latch = (bus_clock/divide + HZ / 2) / HZ; latch = DIV_ROUND_CLOSEST(bus_clock/divide, HZ);
printk("Timer start : latch = %ld\n", latch); printk("Timer start : latch = %ld\n", latch);

View File

@ -42,6 +42,8 @@ SECTIONS
_etext = .; /* End of text section */ _etext = .; /* End of text section */
EXCEPTION_TABLE(16) EXCEPTION_TABLE(16)
NOTES
RODATA RODATA
RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */ _edata = .; /* End of data section */

View File

@ -414,6 +414,10 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_POPULATES_NODE_MAP config ARCH_POPULATES_NODE_MAP
def_bool y def_bool y
config SYS_SUPPORTS_HUGETLBFS
def_bool y
depends on PPC_BOOK3S_64
source "mm/Kconfig" source "mm/Kconfig"
config ARCH_MEMORY_PROBE config ARCH_MEMORY_PROBE

View File

@ -777,7 +777,7 @@ int update_persistent_clock(struct timespec now)
return ppc_md.set_rtc_time(&tm); return ppc_md.set_rtc_time(&tm);
} }
void read_persistent_clock(struct timespec *ts) static void __read_persistent_clock(struct timespec *ts)
{ {
struct rtc_time tm; struct rtc_time tm;
static int first = 1; static int first = 1;
@ -800,10 +800,23 @@ void read_persistent_clock(struct timespec *ts)
return; return;
} }
ppc_md.get_rtc_time(&tm); ppc_md.get_rtc_time(&tm);
ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec); tm.tm_hour, tm.tm_min, tm.tm_sec);
} }
void read_persistent_clock(struct timespec *ts)
{
__read_persistent_clock(ts);
/* Sanitize it in case real time clock is set below EPOCH */
if (ts->tv_sec < 0) {
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
}
/* clocksource code */ /* clocksource code */
static cycle_t rtc_read(struct clocksource *cs) static cycle_t rtc_read(struct clocksource *cs)
{ {

View File

@ -48,7 +48,11 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
{ {
/* type has to be known at build time for optimization */ /* type has to be known at build time for optimization */
/* The BUILD_BUG_ON below breaks in funny ways, commented out
* for now ... -BenH
BUILD_BUG_ON(__builtin_constant_p(type)); BUILD_BUG_ON(__builtin_constant_p(type));
*/
switch (type) { switch (type) {
case EXT_INTR_EXITS: case EXT_INTR_EXITS:
vcpu->stat.ext_intr_exits++; vcpu->stat.ext_intr_exits++;

View File

@ -25,8 +25,8 @@
* also clear mm->cpu_vm_mask bits when processes are migrated * also clear mm->cpu_vm_mask bits when processes are migrated
*/ */
#define DEBUG_MAP_CONSISTENCY //#define DEBUG_MAP_CONSISTENCY
#define DEBUG_CLAMP_LAST_CONTEXT 31 //#define DEBUG_CLAMP_LAST_CONTEXT 31
//#define DEBUG_HARDER //#define DEBUG_HARDER
/* We don't use DEBUG because it tends to be compiled in always nowadays /* We don't use DEBUG because it tends to be compiled in always nowadays

View File

@ -432,8 +432,6 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
/* Read config space back so we can restore after reset */ /* Read config space back so we can restore after reset */
read_msi_msg(virq, &msg); read_msi_msg(virq, &msg);
entry->msg = msg; entry->msg = msg;
unmask_msi_irq(virq);
} }
return 0; return 0;

View File

@ -18,6 +18,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/msi.h>
#include <linux/of.h> #include <linux/of.h>
#include <asm/firmware.h> #include <asm/firmware.h>
@ -219,6 +220,14 @@ static void xics_unmask_irq(unsigned int virq)
static unsigned int xics_startup(unsigned int virq) static unsigned int xics_startup(unsigned int virq)
{ {
/*
* The generic MSI code returns with the interrupt disabled on the
* card, using the MSI mask bits. Firmware doesn't appear to unmask
* at that level, so we do it here by hand.
*/
if (irq_to_desc(virq)->msi_desc)
unmask_msi_irq(virq);
/* unmask it */ /* unmask it */
xics_unmask_irq(virq); xics_unmask_irq(virq);
return 0; return 0;

View File

@ -41,7 +41,7 @@ struct rw_semaphore {
#endif #endif
#define __RWSEM_INITIALIZER(name) \ #define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) \ LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEP_MAP_INIT(name) } __RWSEM_DEP_MAP_INIT(name) }

View File

@ -567,7 +567,7 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
* NOTE: the return address is guaranteed to be setup by the * NOTE: the return address is guaranteed to be setup by the
* time this function makes its first function call. * time this function makes its first function call.
*/ */
if (!pc && !prev) if (!pc || !prev)
pc = (unsigned long)current_text_addr(); pc = (unsigned long)current_text_addr();
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER

View File

@ -65,6 +65,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
for (v = start; v < end; v += L1_CACHE_BYTES) { for (v = start; v < end; v += L1_CACHE_BYTES) {
unsigned long icacheaddr; unsigned long icacheaddr;
int j, n;
__ocbwb(v); __ocbwb(v);
@ -72,8 +73,10 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
cpu_data->icache.entry_mask); cpu_data->icache.entry_mask);
/* Clear i-cache line valid-bit */ /* Clear i-cache line valid-bit */
n = boot_cpu_data.icache.n_aliases;
for (i = 0; i < cpu_data->icache.ways; i++) { for (i = 0; i < cpu_data->icache.ways; i++) {
__raw_writel(0, icacheaddr); for (j = 0; j < n; j++)
__raw_writel(0, icacheaddr + (j * PAGE_SIZE));
icacheaddr += cpu_data->icache.way_incr; icacheaddr += cpu_data->icache.way_incr;
} }
} }

View File

@ -31,6 +31,7 @@ extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void); extern void amd_iommu_flush_all_domains(void);
extern void amd_iommu_flush_all_devices(void); extern void amd_iommu_flush_all_devices(void);
extern void amd_iommu_shutdown(void); extern void amd_iommu_shutdown(void);
extern void amd_iommu_apply_erratum_63(u16 devid);
#else #else
static inline int amd_iommu_init(void) { return -ENODEV; } static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { } static inline void amd_iommu_detect(void) { }

View File

@ -288,7 +288,7 @@ static inline void load_LDT(mm_context_t *pc)
static inline unsigned long get_desc_base(const struct desc_struct *desc) static inline unsigned long get_desc_base(const struct desc_struct *desc)
{ {
return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
} }
static inline void set_desc_base(struct desc_struct *desc, unsigned long base) static inline void set_desc_base(struct desc_struct *desc, unsigned long base)

View File

@ -1000,7 +1000,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ extern unsigned long KSTK_ESP(struct task_struct *task);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
extern void start_thread(struct pt_regs *regs, unsigned long new_ip, extern void start_thread(struct pt_regs *regs, unsigned long new_ip,

View File

@ -143,7 +143,7 @@ extern unsigned long node_remap_size[];
| 1*SD_BALANCE_FORK \ | 1*SD_BALANCE_FORK \
| 0*SD_BALANCE_WAKE \ | 0*SD_BALANCE_WAKE \
| 1*SD_WAKE_AFFINE \ | 1*SD_WAKE_AFFINE \
| 1*SD_PREFER_LOCAL \ | 0*SD_PREFER_LOCAL \
| 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_CPUPOWER \
| 0*SD_POWERSAVINGS_BALANCE \ | 0*SD_POWERSAVINGS_BALANCE \
| 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SHARE_PKG_RESOURCES \

View File

@ -1220,6 +1220,8 @@ static void __detach_device(struct protection_domain *domain, u16 devid)
amd_iommu_dev_table[devid].data[1] = 0; amd_iommu_dev_table[devid].data[1] = 0;
amd_iommu_dev_table[devid].data[2] = 0; amd_iommu_dev_table[devid].data[2] = 0;
amd_iommu_apply_erratum_63(devid);
/* decrease reference counter */ /* decrease reference counter */
domain->dev_cnt -= 1; domain->dev_cnt -= 1;

View File

@ -240,7 +240,7 @@ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
} }
static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
{ {
u32 ctrl; u32 ctrl;
@ -519,6 +519,26 @@ static void set_dev_entry_bit(u16 devid, u8 bit)
amd_iommu_dev_table[devid].data[i] |= (1 << _bit); amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
} }
static int get_dev_entry_bit(u16 devid, u8 bit)
{
int i = (bit >> 5) & 0x07;
int _bit = bit & 0x1f;
return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
}
void amd_iommu_apply_erratum_63(u16 devid)
{
int sysmgt;
sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
(get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
if (sysmgt == 0x01)
set_dev_entry_bit(devid, DEV_ENTRY_IW);
}
/* Writes the specific IOMMU for a device into the rlookup table */ /* Writes the specific IOMMU for a device into the rlookup table */
static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
{ {
@ -547,6 +567,8 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
if (flags & ACPI_DEVFLAG_LINT1) if (flags & ACPI_DEVFLAG_LINT1)
set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
amd_iommu_apply_erratum_63(devid);
set_iommu_for_device(iommu, devid); set_iommu_for_device(iommu, devid);
} }

View File

@ -846,7 +846,7 @@ int __init mtrr_cleanup(unsigned address_bits)
sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
range_sums = sum_ranges(range, nr_range); range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM coverred: %ldM\n", printk(KERN_INFO "total RAM covered: %ldM\n",
range_sums >> (20 - PAGE_SHIFT)); range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) { if (mtrr_chunk_size && mtrr_gran_size) {

View File

@ -664,3 +664,8 @@ long sys_arch_prctl(int code, unsigned long addr)
return do_arch_prctl(current, code, addr); return do_arch_prctl(current, code, addr);
} }
unsigned long KSTK_ESP(struct task_struct *task)
{
return (test_tsk_thread_flag(task, TIF_IA32)) ?
(task_pt_regs(task)->sp) : ((task)->thread.usersp);
}

View File

@ -436,6 +436,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
}, },
}, },
{ /* Handle problems with rebooting on Apple Macmini3,1 */
.callback = set_pci_reboot,
.ident = "Apple Macmini3,1",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
},
},
{ } { }
}; };

View File

@ -1692,7 +1692,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
unsigned bank_num = mcg_cap & 0xff, bank; unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL; r = -EINVAL;
if (!bank_num) if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out; goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out; goto out;
@ -4051,7 +4051,7 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
} }
static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
struct desc_struct *seg_desc) struct desc_struct *seg_desc)
{ {
u32 base_addr = get_desc_base(seg_desc); u32 base_addr = get_desc_base(seg_desc);

View File

@ -178,6 +178,7 @@ static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
static void xen_cpuid(unsigned int *ax, unsigned int *bx, static void xen_cpuid(unsigned int *ax, unsigned int *bx,
unsigned int *cx, unsigned int *dx) unsigned int *cx, unsigned int *dx)
{ {
unsigned maskebx = ~0;
unsigned maskecx = ~0; unsigned maskecx = ~0;
unsigned maskedx = ~0; unsigned maskedx = ~0;
@ -185,9 +186,16 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
* Mask out inconvenient features, to try and disable as many * Mask out inconvenient features, to try and disable as many
* unsupported kernel subsystems as possible. * unsupported kernel subsystems as possible.
*/ */
if (*ax == 1) { switch (*ax) {
case 1:
maskecx = cpuid_leaf1_ecx_mask; maskecx = cpuid_leaf1_ecx_mask;
maskedx = cpuid_leaf1_edx_mask; maskedx = cpuid_leaf1_edx_mask;
break;
case 0xb:
/* Suppress extended topology stuff */
maskebx = 0;
break;
} }
asm(XEN_EMULATE_PREFIX "cpuid" asm(XEN_EMULATE_PREFIX "cpuid"
@ -197,6 +205,7 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
"=d" (*dx) "=d" (*dx)
: "0" (*ax), "2" (*cx)); : "0" (*ax), "2" (*cx));
*bx &= maskebx;
*cx &= maskecx; *cx &= maskecx;
*dx &= maskedx; *dx &= maskedx;
} }

View File

@ -55,7 +55,7 @@ static inline void notify_daemon(void)
notify_remote_via_evtchn(xen_start_info->console.domU.evtchn); notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
} }
static int write_console(uint32_t vtermno, const char *data, int len) static int __write_console(const char *data, int len)
{ {
struct xencons_interface *intf = xencons_interface(); struct xencons_interface *intf = xencons_interface();
XENCONS_RING_IDX cons, prod; XENCONS_RING_IDX cons, prod;
@ -76,6 +76,29 @@ static int write_console(uint32_t vtermno, const char *data, int len)
return sent; return sent;
} }
static int write_console(uint32_t vtermno, const char *data, int len)
{
int ret = len;
/*
* Make sure the whole buffer is emitted, polling if
* necessary. We don't ever want to rely on the hvc daemon
* because the most interesting console output is when the
* kernel is crippled.
*/
while (len) {
int sent = __write_console(data, len);
data += sent;
len -= sent;
if (unlikely(len))
HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
}
return ret;
}
static int read_console(uint32_t vtermno, char *buf, int len) static int read_console(uint32_t vtermno, char *buf, int len)
{ {
struct xencons_interface *intf = xencons_interface(); struct xencons_interface *intf = xencons_interface();

View File

@ -2254,7 +2254,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
{ {
u32 ec = ERROR_CODE(info->nbsl); u32 ec = ERROR_CODE(info->nbsl);
u32 xec = EXT_ERROR_CODE(info->nbsl); u32 xec = EXT_ERROR_CODE(info->nbsl);
int ecc_type = info->nbsh & (0x3 << 13); int ecc_type = (info->nbsh >> 13) & 0x3;
/* Bail early out if this was an 'observed' error */ /* Bail early out if this was an 'observed' error */
if (PP(ec) == K8_NBSL_PP_OBS) if (PP(ec) == K8_NBSL_PP_OBS)
@ -3163,7 +3163,7 @@ static int __init amd64_edac_init(void)
opstate_init(); opstate_init();
if (cache_k8_northbridges() < 0) if (cache_k8_northbridges() < 0)
goto err_exit; return err;
err = pci_register_driver(&amd64_pci_driver); err = pci_register_driver(&amd64_pci_driver);
if (err) if (err)
@ -3189,8 +3189,6 @@ static int __init amd64_edac_init(void)
err_2nd_stage: err_2nd_stage:
debugf0("2nd stage failed\n"); debugf0("2nd stage failed\n");
err_exit:
pci_unregister_driver(&amd64_pci_driver); pci_unregister_driver(&amd64_pci_driver);
return err; return err;

View File

@ -1227,8 +1227,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
goto out; goto out;
/* Try to set up FBC with a reasonable compressed buffer size */ /* Try to set up FBC with a reasonable compressed buffer size */
if (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev) || IS_GM45(dev)) && if (I915_HAS_FBC(dev) && i915_powersave) {
i915_powersave) {
int cfb_size; int cfb_size;
/* Try to get an 8M buffer... */ /* Try to get an 8M buffer... */

View File

@ -296,6 +296,12 @@ typedef struct drm_i915_private {
u32 saveVBLANK_A; u32 saveVBLANK_A;
u32 saveVSYNC_A; u32 saveVSYNC_A;
u32 saveBCLRPAT_A; u32 saveBCLRPAT_A;
u32 saveTRANS_HTOTAL_A;
u32 saveTRANS_HBLANK_A;
u32 saveTRANS_HSYNC_A;
u32 saveTRANS_VTOTAL_A;
u32 saveTRANS_VBLANK_A;
u32 saveTRANS_VSYNC_A;
u32 savePIPEASTAT; u32 savePIPEASTAT;
u32 saveDSPASTRIDE; u32 saveDSPASTRIDE;
u32 saveDSPASIZE; u32 saveDSPASIZE;
@ -304,8 +310,11 @@ typedef struct drm_i915_private {
u32 saveDSPASURF; u32 saveDSPASURF;
u32 saveDSPATILEOFF; u32 saveDSPATILEOFF;
u32 savePFIT_PGM_RATIOS; u32 savePFIT_PGM_RATIOS;
u32 saveBLC_HIST_CTL;
u32 saveBLC_PWM_CTL; u32 saveBLC_PWM_CTL;
u32 saveBLC_PWM_CTL2; u32 saveBLC_PWM_CTL2;
u32 saveBLC_CPU_PWM_CTL;
u32 saveBLC_CPU_PWM_CTL2;
u32 saveFPB0; u32 saveFPB0;
u32 saveFPB1; u32 saveFPB1;
u32 saveDPLL_B; u32 saveDPLL_B;
@ -317,6 +326,12 @@ typedef struct drm_i915_private {
u32 saveVBLANK_B; u32 saveVBLANK_B;
u32 saveVSYNC_B; u32 saveVSYNC_B;
u32 saveBCLRPAT_B; u32 saveBCLRPAT_B;
u32 saveTRANS_HTOTAL_B;
u32 saveTRANS_HBLANK_B;
u32 saveTRANS_HSYNC_B;
u32 saveTRANS_VTOTAL_B;
u32 saveTRANS_VBLANK_B;
u32 saveTRANS_VSYNC_B;
u32 savePIPEBSTAT; u32 savePIPEBSTAT;
u32 saveDSPBSTRIDE; u32 saveDSPBSTRIDE;
u32 saveDSPBSIZE; u32 saveDSPBSIZE;
@ -342,6 +357,7 @@ typedef struct drm_i915_private {
u32 savePFIT_CONTROL; u32 savePFIT_CONTROL;
u32 save_palette_a[256]; u32 save_palette_a[256];
u32 save_palette_b[256]; u32 save_palette_b[256];
u32 saveDPFC_CB_BASE;
u32 saveFBC_CFB_BASE; u32 saveFBC_CFB_BASE;
u32 saveFBC_LL_BASE; u32 saveFBC_LL_BASE;
u32 saveFBC_CONTROL; u32 saveFBC_CONTROL;
@ -349,6 +365,12 @@ typedef struct drm_i915_private {
u32 saveIER; u32 saveIER;
u32 saveIIR; u32 saveIIR;
u32 saveIMR; u32 saveIMR;
u32 saveDEIER;
u32 saveDEIMR;
u32 saveGTIER;
u32 saveGTIMR;
u32 saveFDI_RXA_IMR;
u32 saveFDI_RXB_IMR;
u32 saveCACHE_MODE_0; u32 saveCACHE_MODE_0;
u32 saveD_STATE; u32 saveD_STATE;
u32 saveDSPCLK_GATE_D; u32 saveDSPCLK_GATE_D;
@ -382,6 +404,16 @@ typedef struct drm_i915_private {
u32 savePIPEB_DP_LINK_M; u32 savePIPEB_DP_LINK_M;
u32 savePIPEA_DP_LINK_N; u32 savePIPEA_DP_LINK_N;
u32 savePIPEB_DP_LINK_N; u32 savePIPEB_DP_LINK_N;
u32 saveFDI_RXA_CTL;
u32 saveFDI_TXA_CTL;
u32 saveFDI_RXB_CTL;
u32 saveFDI_TXB_CTL;
u32 savePFA_CTL_1;
u32 savePFB_CTL_1;
u32 savePFA_WIN_SZ;
u32 savePFB_WIN_SZ;
u32 savePFA_WIN_POS;
u32 savePFB_WIN_POS;
struct { struct {
struct drm_mm gtt_space; struct drm_mm gtt_space;
@ -492,6 +524,8 @@ typedef struct drm_i915_private {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
} mm; } mm;
struct sdvo_device_mapping sdvo_mappings[2]; struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
/* Reclocking support */ /* Reclocking support */
bool render_reclock_avail; bool render_reclock_avail;
@ -981,7 +1015,10 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && (IS_I9XX(dev) || IS_I965G(dev))) #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
(IS_I9XX(dev) || IS_GM45(dev)) && \
!IS_IGD(dev) && \
!IS_IGDNG(dev))
#define PRIMARY_RINGBUFFER_SIZE (128*1024) #define PRIMARY_RINGBUFFER_SIZE (128*1024)

View File

@ -968,6 +968,8 @@
#define LVDS_PORT_EN (1 << 31) #define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */ /* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30) #define LVDS_PIPEB_SELECT (1 << 30)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/* /*
* Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
* pixel. * pixel.
@ -1078,6 +1080,8 @@
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0) #define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
#define BLC_HIST_CTL 0x61260
/* TV port control */ /* TV port control */
#define TV_CTL 0x68000 #define TV_CTL 0x68000
/** Enables the TV encoder */ /** Enables the TV encoder */
@ -1780,6 +1784,11 @@
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
#define DSPARB 0x70030 #define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_MASK (0x7f << 7)
@ -1790,17 +1799,29 @@
#define DSPARB_AEND_SHIFT 0 #define DSPARB_AEND_SHIFT 0
#define DSPFW1 0x70034 #define DSPFW1 0x70034
#define DSPFW_SR_SHIFT 23
#define DSPFW_CURSORB_SHIFT 16
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW2 0x70038 #define DSPFW2 0x70038
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 16
#define DSPFW3 0x7003c #define DSPFW3 0x7003c
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define IGD_SELF_REFRESH_EN (1<<30) #define IGD_SELF_REFRESH_EN (1<<30)
/* FIFO watermark sizes etc */ /* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32 #define I830_FIFO_LINE_SIZE 32
#define G4X_FIFO_SIZE 127
#define I945_FIFO_SIZE 127 /* 945 & 965 */ #define I945_FIFO_SIZE 127 /* 945 & 965 */
#define I915_FIFO_SIZE 95 #define I915_FIFO_SIZE 95
#define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95 #define I830_FIFO_SIZE 95
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f #define I915_MAX_WM 0x3f
#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ #define IGD_DISPLAY_FIFO 512 /* in 64byte unit */
@ -2030,6 +2051,11 @@
#define PFA_CTL_1 0x68080 #define PFA_CTL_1 0x68080
#define PFB_CTL_1 0x68880 #define PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31) #define PF_ENABLE (1<<31)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
#define PFA_WIN_SZ 0x68074 #define PFA_WIN_SZ 0x68074
#define PFB_WIN_SZ 0x68874 #define PFB_WIN_SZ 0x68874
#define PFA_WIN_POS 0x68070 #define PFA_WIN_POS 0x68070
@ -2149,11 +2175,11 @@
#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13) #define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
#define DREF_SSC_SOURCE_DISABLE (0<<11) #define DREF_SSC_SOURCE_DISABLE (0<<11)
#define DREF_SSC_SOURCE_ENABLE (2<<11) #define DREF_SSC_SOURCE_ENABLE (2<<11)
#define DREF_SSC_SOURCE_MASK (2<<11) #define DREF_SSC_SOURCE_MASK (3<<11)
#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9) #define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
#define DREF_NONSPREAD_CK505_ENABLE (1<<9) #define DREF_NONSPREAD_CK505_ENABLE (1<<9)
#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9) #define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
#define DREF_NONSPREAD_SOURCE_MASK (2<<9) #define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7) #define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7) #define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6) #define DREF_SSC4_DOWNSPREAD (0<<6)

View File

@ -32,11 +32,15 @@
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
if (pipe == PIPE_A) if (IS_IGDNG(dev)) {
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
else } else {
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
}
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
} }
static void i915_save_palette(struct drm_device *dev, enum pipe pipe) static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
@ -49,6 +53,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe)) if (!i915_pipe_enabled(dev, pipe))
return; return;
if (IS_IGDNG(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A) if (pipe == PIPE_A)
array = dev_priv->save_palette_a; array = dev_priv->save_palette_a;
else else
@ -68,6 +75,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
if (!i915_pipe_enabled(dev, pipe)) if (!i915_pipe_enabled(dev, pipe))
return; return;
if (IS_IGDNG(dev))
reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
if (pipe == PIPE_A) if (pipe == PIPE_A)
array = dev_priv->save_palette_a; array = dev_priv->save_palette_a;
else else
@ -232,10 +242,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane A info */ /* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC); dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0); if (IS_IGDNG(dev)) {
dev_priv->saveFPA1 = I915_READ(FPA1); dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
dev_priv->saveDPLL_A = I915_READ(DPLL_A); dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
if (IS_I965G(dev)) dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
} else {
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
}
if (IS_I965G(dev) && !IS_IGDNG(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@ -243,7 +259,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); if (!IS_IGDNG(dev))
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
if (IS_IGDNG(dev)) {
dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
}
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
@ -260,10 +293,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */ /* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0); if (IS_IGDNG(dev)) {
dev_priv->saveFPB1 = I915_READ(FPB1); dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
dev_priv->saveDPLL_B = I915_READ(DPLL_B); dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
if (IS_I965G(dev)) dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
} else {
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
}
if (IS_I965G(dev) && !IS_IGDNG(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@ -271,7 +310,24 @@ static void i915_save_modeset_reg(struct drm_device *dev)
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); if (!IS_IGDNG(dev))
dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
if (IS_IGDNG(dev)) {
dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
}
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
@ -290,23 +346,41 @@ static void i915_save_modeset_reg(struct drm_device *dev)
static void i915_restore_modeset_reg(struct drm_device *dev) static void i915_restore_modeset_reg(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return; return;
if (IS_IGDNG(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
fpa0_reg = PCH_FPA0;
fpb0_reg = PCH_FPB0;
fpa1_reg = PCH_FPA1;
fpb1_reg = PCH_FPB1;
} else {
dpll_a_reg = DPLL_A;
dpll_b_reg = DPLL_B;
fpa0_reg = FPA0;
fpb0_reg = FPB0;
fpa1_reg = FPA1;
fpb1_reg = FPB1;
}
/* Pipe & plane A info */ /* Pipe & plane A info */
/* Prime the clock */ /* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE); ~DPLL_VCO_ENABLE);
DRM_UDELAY(150); DRM_UDELAY(150);
} }
I915_WRITE(FPA0, dev_priv->saveFPA0); I915_WRITE(fpa0_reg, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1); I915_WRITE(fpa1_reg, dev_priv->saveFPA1);
/* Actually enable it */ /* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
DRM_UDELAY(150); DRM_UDELAY(150);
if (IS_I965G(dev)) if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
DRM_UDELAY(150); DRM_UDELAY(150);
@ -317,7 +391,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); if (!IS_IGDNG(dev))
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (IS_IGDNG(dev)) {
I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */ /* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
@ -339,14 +430,14 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
/* Pipe & plane B info */ /* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE); ~DPLL_VCO_ENABLE);
DRM_UDELAY(150); DRM_UDELAY(150);
} }
I915_WRITE(FPB0, dev_priv->saveFPB0); I915_WRITE(fpb0_reg, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1); I915_WRITE(fpb1_reg, dev_priv->saveFPB1);
/* Actually enable it */ /* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
DRM_UDELAY(150); DRM_UDELAY(150);
if (IS_I965G(dev)) if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
@ -359,7 +450,24 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); if (!IS_IGDNG(dev))
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (IS_IGDNG(dev)) {
I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */ /* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
@ -404,21 +512,43 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveCURSIZE = I915_READ(CURSIZE); dev_priv->saveCURSIZE = I915_READ(CURSIZE);
/* CRT state */ /* CRT state */
dev_priv->saveADPA = I915_READ(ADPA); if (IS_IGDNG(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
} else {
dev_priv->saveADPA = I915_READ(ADPA);
}
/* LVDS state */ /* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); if (IS_IGDNG(dev)) {
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
if (IS_I965G(dev)) dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
if (IS_MOBILE(dev) && !IS_I830(dev)) dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
dev_priv->saveLVDS = I915_READ(LVDS); dev_priv->saveLVDS = I915_READ(PCH_LVDS);
if (!IS_I830(dev) && !IS_845G(dev)) } else {
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
}
if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); if (IS_IGDNG(dev)) {
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
} else {
dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
/* Display Port state */ /* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) { if (SUPPORTS_INTEGRATED_DP(dev)) {
@ -437,16 +567,23 @@ void i915_save_display(struct drm_device *dev)
/* FIXME: save TV & SDVO state */ /* FIXME: save TV & SDVO state */
/* FBC state */ /* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); if (IS_GM45(dev)) {
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); } else {
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
}
/* VGA state */ /* VGA state */
dev_priv->saveVGA0 = I915_READ(VGA0); dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1); dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD); dev_priv->saveVGA_PD = I915_READ(VGA_PD);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); if (IS_IGDNG(dev))
dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev); i915_save_vga(dev);
} }
@ -485,22 +622,41 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
/* CRT state */ /* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA); if (IS_IGDNG(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
else
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */ /* LVDS state */
if (IS_I965G(dev)) if (IS_I965G(dev) && !IS_IGDNG(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
if (IS_IGDNG(dev)) {
I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
} else if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS); I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); if (IS_IGDNG(dev)) {
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL);
I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
}
/* Display Port state */ /* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) { if (SUPPORTS_INTEGRATED_DP(dev)) {
@ -511,13 +667,22 @@ void i915_restore_display(struct drm_device *dev)
/* FIXME: restore TV & SDVO state */ /* FIXME: restore TV & SDVO state */
/* FBC info */ /* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); if (IS_GM45(dev)) {
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); g4x_disable_fbc(dev);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); } else {
i8xx_disable_fbc(dev);
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
}
/* VGA state */ /* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); if (IS_IGDNG(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->saveVGA0); I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1); I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@ -543,8 +708,17 @@ int i915_save_state(struct drm_device *dev)
i915_save_display(dev); i915_save_display(dev);
/* Interrupt state */ /* Interrupt state */
dev_priv->saveIER = I915_READ(IER); if (IS_IGDNG(dev)) {
dev_priv->saveIMR = I915_READ(IMR); dev_priv->saveDEIER = I915_READ(DEIER);
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
dev_priv->saveGTIMR = I915_READ(GTIMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
}
/* Clock gating state */ /* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE); dev_priv->saveD_STATE = I915_READ(D_STATE);
@ -609,8 +783,17 @@ int i915_restore_state(struct drm_device *dev)
i915_restore_display(dev); i915_restore_display(dev);
/* Interrupt state */ /* Interrupt state */
I915_WRITE (IER, dev_priv->saveIER); if (IS_IGDNG(dev)) {
I915_WRITE (IMR, dev_priv->saveIMR); I915_WRITE(DEIER, dev_priv->saveDEIER);
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
} else {
I915_WRITE (IER, dev_priv->saveIER);
I915_WRITE (IMR, dev_priv->saveIMR);
}
/* Clock gating state */ /* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE); I915_WRITE (D_STATE, dev_priv->saveD_STATE);

View File

@ -351,20 +351,18 @@ parse_driver_features(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct bdb_driver_features *driver; struct bdb_driver_features *driver;
/* set default for chips without eDP */
if (!SUPPORTS_EDP(dev)) {
dev_priv->edp_support = 0;
return;
}
driver = find_section(bdb, BDB_DRIVER_FEATURES); driver = find_section(bdb, BDB_DRIVER_FEATURES);
if (!driver) if (!driver)
return; return;
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) if (driver && SUPPORTS_EDP(dev) &&
driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
dev_priv->edp_support = 1; dev_priv->edp_support = 1;
} else {
dev_priv->edp_support = 0;
}
if (driver->dual_frequency) if (driver && driver->dual_frequency)
dev_priv->render_reclock_avail = true; dev_priv->render_reclock_avail = true;
} }

View File

@ -943,6 +943,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2); clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t)); memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true; return true;
} }
@ -1260,9 +1261,11 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret; return ret;
} }
/* Pre-i965 needs to install a fence for tiled scan-out */ /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
if (!IS_I965G(dev) && * whereas 965+ only requires a fence if using framebuffer compression.
obj_priv->fence_reg == I915_FENCE_REG_NONE && * For simplicity, we always install a fence as the cost is not that onerous.
*/
if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
obj_priv->tiling_mode != I915_TILING_NONE) { obj_priv->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj); ret = i915_gem_object_get_fence_reg(obj);
if (ret != 0) { if (ret != 0) {
@ -1513,7 +1516,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Enable panel fitting for LVDS */ /* Enable panel fitting for LVDS */
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(pf_ctl_reg); temp = I915_READ(pf_ctl_reg);
I915_WRITE(pf_ctl_reg, temp | PF_ENABLE); I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
/* currently full aspect */ /* currently full aspect */
I915_WRITE(pf_win_pos, 0); I915_WRITE(pf_win_pos, 0);
@ -1801,6 +1804,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
intel_update_watermarks(dev);
/* Enable the DPLL */ /* Enable the DPLL */
temp = I915_READ(dpll_reg); temp = I915_READ(dpll_reg);
if ((temp & DPLL_VCO_ENABLE) == 0) { if ((temp & DPLL_VCO_ENABLE) == 0) {
@ -1838,7 +1843,6 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
/* Give the overlay scaler a chance to enable if it's on this pipe */ /* Give the overlay scaler a chance to enable if it's on this pipe */
//intel_crtc_dpms_video(crtc, true); TODO //intel_crtc_dpms_video(crtc, true); TODO
intel_update_watermarks(dev);
break; break;
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
intel_update_watermarks(dev); intel_update_watermarks(dev);
@ -2082,7 +2086,7 @@ fdi_reduce_ratio(u32 *num, u32 *den)
#define LINK_N 0x80000 #define LINK_N 0x80000
static void static void
igdng_compute_m_n(int bytes_per_pixel, int nlanes, igdng_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock, int pixel_clock, int link_clock,
struct fdi_m_n *m_n) struct fdi_m_n *m_n)
{ {
@ -2092,7 +2096,8 @@ igdng_compute_m_n(int bytes_per_pixel, int nlanes,
temp = (u64) DATA_N * pixel_clock; temp = (u64) DATA_N * pixel_clock;
temp = div_u64(temp, link_clock); temp = div_u64(temp, link_clock);
m_n->gmch_m = div_u64(temp * bytes_per_pixel, nlanes); m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
m_n->gmch_n = DATA_N; m_n->gmch_n = DATA_N;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
@ -2140,6 +2145,13 @@ static struct intel_watermark_params igd_cursor_hplloff_wm = {
IGD_CURSOR_GUARD_WM, IGD_CURSOR_GUARD_WM,
IGD_FIFO_LINE_SIZE IGD_FIFO_LINE_SIZE
}; };
static struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static struct intel_watermark_params i945_wm_info = { static struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE, I945_FIFO_SIZE,
I915_MAX_WM, I915_MAX_WM,
@ -2430,17 +2442,74 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size; return size;
} }
static void g4x_update_wm(struct drm_device *dev, int unused, int unused2, static void g4x_update_wm(struct drm_device *dev, int planea_clock,
int unused3, int unused4) int planeb_clock, int sr_hdisplay, int pixel_size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 fw_blc_self = I915_READ(FW_BLC_SELF); int total_size, cacheline_size;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
struct intel_watermark_params planea_params, planeb_params;
unsigned long line_time_us;
int sr_clock, sr_entries = 0, entries_required;
if (i915_powersave) /* Create copies of the base settings for each pipe */
fw_blc_self |= FW_BLC_SELF_EN; planea_params = planeb_params = g4x_wm_info;
else
fw_blc_self &= ~FW_BLC_SELF_EN; /* Grab a couple of global values before we overwrite them */
I915_WRITE(FW_BLC_SELF, fw_blc_self); total_size = planea_params.fifo_size;
cacheline_size = planea_params.cacheline_size;
/*
* Note: we need to make sure we don't overflow for various clock &
* latency values.
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
1000;
entries_required /= G4X_FIFO_LINE_SIZE;
planea_wm = entries_required + planea_params.guard_size;
entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
1000;
entries_required /= G4X_FIFO_LINE_SIZE;
planeb_wm = entries_required + planeb_params.guard_size;
cursora_wm = cursorb_wm = 16;
cursor_sr = 32;
DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
const static int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
sr_entries = (((sr_latency_ns / line_time_us) + 1) *
pixel_size * sr_hdisplay) / 1000;
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
planea_wm, planeb_wm, sr_entries);
planea_wm &= 0x3f;
planeb_wm &= 0x3f;
I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
} }
static void i965_update_wm(struct drm_device *dev, int unused, int unused2, static void i965_update_wm(struct drm_device *dev, int unused, int unused2,
@ -2586,6 +2655,9 @@ static void intel_update_watermarks(struct drm_device *dev)
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0; int enabled = 0, pixel_size = 0;
if (!dev_priv->display.update_wm)
return;
/* Get the clock config from both planes */ /* Get the clock config from both planes */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
@ -2763,7 +2835,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */ /* FDI link */
if (IS_IGDNG(dev)) { if (IS_IGDNG(dev)) {
int lane, link_bw; int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N /* eDP doesn't require FDI link, so just set DP M/N
according to current link config */ according to current link config */
if (is_edp) { if (is_edp) {
@ -2782,10 +2854,72 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lane = 4; lane = 4;
link_bw = 270000; link_bw = 270000;
} }
igdng_compute_m_n(3, lane, target_clock,
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
bpp = 24;
break;
case PIPE_10BPC:
bpp = 30;
break;
case PIPE_6BPC:
bpp = 18;
break;
case PIPE_12BPC:
bpp = 36;
break;
default:
DRM_ERROR("unknown pipe bpc value\n");
bpp = 24;
}
igdng_compute_m_n(bpp, lane, target_clock,
link_bw, &m_n); link_bw, &m_n);
} }
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
if (IS_IGDNG(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (is_edp) {
if (dev_priv->lvds_use_ssc) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
} else {
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
}
}
}
if (IS_IGD(dev)) { if (IS_IGD(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock) if (has_reduced_clock)
@ -2936,6 +3070,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
lvds = I915_READ(lvds_reg); lvds = I915_READ(lvds_reg);
lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
/* set the corresponsding LVDS_BORDER bit */
lvds |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to /* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not. * set the DPLLs for dual-channel mode or not.
*/ */
@ -4124,7 +4260,9 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the * Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can. * specs, but enable as much else as we can.
*/ */
if (IS_G4X(dev)) { if (IS_IGDNG(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate; uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0); I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@ -4212,7 +4350,9 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed; i830_get_display_clock_speed;
/* For FIFO watermark updates */ /* For FIFO watermark updates */
if (IS_G4X(dev)) if (IS_IGDNG(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm; dev_priv->display.update_wm = g4x_update_wm;
else if (IS_I965G(dev)) else if (IS_I965G(dev))
dev_priv->display.update_wm = i965_update_wm; dev_priv->display.update_wm = i965_update_wm;

View File

@ -400,7 +400,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
{ {
struct intel_dp_priv *dp_priv = intel_output->dev_priv; struct intel_dp_priv *dp_priv = intel_output->dev_priv;
DRM_ERROR("i2c_init %s\n", name); DRM_DEBUG_KMS("i2c_init %s\n", name);
dp_priv->algo.running = false; dp_priv->algo.running = false;
dp_priv->algo.address = 0; dp_priv->algo.address = 0;
dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch; dp_priv->algo.aux_ch = intel_dp_i2c_aux_ch;

View File

@ -380,7 +380,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
adjusted_mode->crtc_vblank_start + vsync_pos; adjusted_mode->crtc_vblank_start + vsync_pos;
/* keep the vsync width constant */ /* keep the vsync width constant */
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_end =
adjusted_mode->crtc_vblank_start + vsync_width; adjusted_mode->crtc_vsync_start + vsync_width;
border = 1; border = 1;
break; break;
case DRM_MODE_SCALE_ASPECT: case DRM_MODE_SCALE_ASPECT:
@ -525,6 +525,14 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
out: out:
lvds_priv->pfit_control = pfit_control; lvds_priv->pfit_control = pfit_control;
lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
/*
* When there exists the border, it means that the LVDS_BORDR
* should be enabled.
*/
if (border)
dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
else
dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
/* /*
* XXX: It would be nice to support lower refresh rates on the * XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the * panels to reduce power consumption, and perhaps match the

View File

@ -161,14 +161,15 @@ static int options_show(struct seq_file *s, void *p)
static ssize_t options_write(struct file *file, const char __user *userbuf, static ssize_t options_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *data) size_t count, loff_t *data)
{ {
unsigned long val; char buf[20];
char buf[80];
if (strncpy_from_user(buf, userbuf, sizeof(buf) - 1) < 0) if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, userbuf, count))
return -EFAULT; return -EFAULT;
buf[count - 1] = '\0'; buf[count] = '\0';
if (!strict_strtoul(buf, 10, &val)) if (strict_strtoul(buf, 0, &gru_options))
gru_options = val; return -EINVAL;
return count; return count;
} }

View File

@ -486,6 +486,7 @@ config MTD_BFIN_ASYNC
config MTD_GPIO_ADDR config MTD_GPIO_ADDR
tristate "GPIO-assisted Flash Chip Support" tristate "GPIO-assisted Flash Chip Support"
depends on GENERIC_GPIO || GPIOLIB
depends on MTD_COMPLEX_MAPPINGS depends on MTD_COMPLEX_MAPPINGS
select MTD_PARTITIONS select MTD_PARTITIONS
help help

View File

@ -13,7 +13,9 @@
* Licensed under the GPL-2 or later. * Licensed under the GPL-2 or later.
*/ */
#include <linux/gpio.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
@ -23,9 +25,6 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/gpio.h>
#include <asm/io.h>
#define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); }) #define pr_devinit(fmt, args...) ({ static const __devinitconst char __fmt[] = fmt; printk(__fmt, ## args); })
#define DRIVER_NAME "gpio-addr-flash" #define DRIVER_NAME "gpio-addr-flash"

View File

@ -761,6 +761,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
* *
* Not for syndrome calculating ecc controllers, which use a special oob layout * Not for syndrome calculating ecc controllers, which use a special oob layout
*/ */
@ -777,6 +778,7 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
* *
* We need a special oob layout and handling even when OOB isn't used. * We need a special oob layout and handling even when OOB isn't used.
*/ */
@ -818,6 +820,7 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd, struct nand_chip *c
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
*/ */
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int page) uint8_t *buf, int page)
@ -939,6 +942,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, uint3
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
* *
* Not for syndrome calculating ecc controllers which need a special oob layout * Not for syndrome calculating ecc controllers which need a special oob layout
*/ */
@ -983,6 +987,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
* *
* Hardware ECC for large page chips, require OOB to be read first. * Hardware ECC for large page chips, require OOB to be read first.
* For this ECC mode, the write_page method is re-used from ECC_HW. * For this ECC mode, the write_page method is re-used from ECC_HW.
@ -1031,6 +1036,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
* @mtd: mtd info structure * @mtd: mtd info structure
* @chip: nand chip info structure * @chip: nand chip info structure
* @buf: buffer to store read data * @buf: buffer to store read data
* @page: page number to read
* *
* The hw generator calculates the error syndrome automatically. Therefor * The hw generator calculates the error syndrome automatically. Therefor
* we need a special oob layout and handling. * we need a special oob layout and handling.

View File

@ -1143,7 +1143,7 @@ static void serial_console_write(struct console *co, const char *s,
while ((sci_in(port, SCxSR) & bits) != bits) while ((sci_in(port, SCxSR) & bits) != bits)
cpu_relax(); cpu_relax();
if (sci_port->disable); if (sci_port->disable)
sci_port->disable(port); sci_port->disable(port);
} }

View File

@ -135,7 +135,7 @@ config TMPFS_POSIX_ACL
config HUGETLBFS config HUGETLBFS
bool "HugeTLB file system support" bool "HugeTLB file system support"
depends on X86 || IA64 || PPC_BOOK3S_64 || SPARC64 || (S390 && 64BIT) || \ depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \
SYS_SUPPORTS_HUGETLBFS || BROKEN SYS_SUPPORTS_HUGETLBFS || BROKEN
help help
hugetlbfs is a filesystem backing for HugeTLB pages, based on hugetlbfs is a filesystem backing for HugeTLB pages, based on

View File

@ -1532,6 +1532,8 @@ int compat_do_execve(char * filename,
if (retval < 0) if (retval < 0)
goto out; goto out;
current->stack_start = current->mm->start_stack;
/* execve succeeded */ /* execve succeeded */
current->fs->in_exec = 0; current->fs->in_exec = 0;
current->in_execve = 0; current->in_execve = 0;

View File

@ -712,8 +712,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
fuse_invalidate_attr(newdir); fuse_invalidate_attr(newdir);
/* newent will end up negative */ /* newent will end up negative */
if (newent->d_inode) if (newent->d_inode) {
fuse_invalidate_attr(newent->d_inode);
fuse_invalidate_entry_cache(newent); fuse_invalidate_entry_cache(newent);
}
} else if (err == -EINTR) { } else if (err == -EINTR) {
/* If request was interrupted, DEITY only knows if the /* If request was interrupted, DEITY only knows if the
rename actually took place. If the invalidation rename actually took place. If the invalidation

View File

@ -1063,7 +1063,8 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf,
break; break;
} }
} }
fuse_put_request(fc, req); if (!IS_ERR(req))
fuse_put_request(fc, req);
if (res > 0) if (res > 0)
*ppos = pos; *ppos = pos;
@ -1599,7 +1600,7 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
kaddr += copy; kaddr += copy;
} }
kunmap(map); kunmap(page);
} }
return 0; return 0;

View File

@ -21,6 +21,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/security.h>
#include "sysfs.h" #include "sysfs.h"
DEFINE_MUTEX(sysfs_mutex); DEFINE_MUTEX(sysfs_mutex);
@ -285,6 +286,9 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
sysfs_put(sd->s_symlink.target_sd); sysfs_put(sd->s_symlink.target_sd);
if (sysfs_type(sd) & SYSFS_COPY_NAME) if (sysfs_type(sd) & SYSFS_COPY_NAME)
kfree(sd->s_name); kfree(sd->s_name);
if (sd->s_iattr && sd->s_iattr->ia_secdata)
security_release_secctx(sd->s_iattr->ia_secdata,
sd->s_iattr->ia_secdata_len);
kfree(sd->s_iattr); kfree(sd->s_iattr);
sysfs_free_ino(sd->s_ino); sysfs_free_ino(sd->s_ino);
kmem_cache_free(sysfs_dir_cachep, sd); kmem_cache_free(sysfs_dir_cachep, sd);

View File

@ -149,29 +149,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
} }
EXPORT_SYMBOL(kthread_create); EXPORT_SYMBOL(kthread_create);
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @k: thread created by kthread_create().
* @cpu: cpu (might not be online, must be possible) for @k to run on.
*
* Description: This function is equivalent to set_cpus_allowed(),
* except that @cpu doesn't need to be online, and the thread must be
* stopped (i.e., just returned from kthread_create()).
*/
void kthread_bind(struct task_struct *k, unsigned int cpu)
{
/* Must have done schedule() in kthread() before we set_task_cpu */
if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
WARN_ON(1);
return;
}
set_task_cpu(k, cpu);
k->cpus_allowed = cpumask_of_cpu(cpu);
k->rt.nr_cpus_allowed = 1;
k->flags |= PF_THREAD_BOUND;
}
EXPORT_SYMBOL(kthread_bind);
/** /**
* kthread_stop - stop a thread created by kthread_create(). * kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create(). * @k: thread created by kthread_create().

View File

@ -1992,6 +1992,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
p->sched_class->prio_changed(rq, p, oldprio, running); p->sched_class->prio_changed(rq, p, oldprio, running);
} }
/**
* kthread_bind - bind a just-created kthread to a cpu.
* @k: thread created by kthread_create().
* @cpu: cpu (might not be online, must be possible) for @k to run on.
*
* Description: This function is equivalent to set_cpus_allowed(),
* except that @cpu doesn't need to be online, and the thread must be
* stopped (i.e., just returned from kthread_create()).
*
* Function lives here instead of kthread.c because it messes with
* scheduler internals which require locking.
*/
void kthread_bind(struct task_struct *p, unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
/* Must have done schedule() in kthread() before we set_task_cpu */
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
WARN_ON(1);
return;
}
spin_lock_irqsave(&rq->lock, flags);
set_task_cpu(p, cpu);
p->cpus_allowed = cpumask_of_cpu(cpu);
p->rt.nr_cpus_allowed = 1;
p->flags |= PF_THREAD_BOUND;
spin_unlock_irqrestore(&rq->lock, flags);
}
EXPORT_SYMBOL(kthread_bind);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Is this task likely cache-hot: * Is this task likely cache-hot:
@ -2004,7 +2036,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
/* /*
* Buddy candidates are cache hot: * Buddy candidates are cache hot:
*/ */
if (sched_feat(CACHE_HOT_BUDDY) && if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
(&p->se == cfs_rq_of(&p->se)->next || (&p->se == cfs_rq_of(&p->se)->next ||
&p->se == cfs_rq_of(&p->se)->last)) &p->se == cfs_rq_of(&p->se)->last))
return 1; return 1;
@ -9532,13 +9564,13 @@ void __init sched_init(void)
current->sched_class = &fair_sched_class; current->sched_class = &fair_sched_class;
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifdef CONFIG_NO_HZ #ifdef CONFIG_NO_HZ
alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
#endif #endif
alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
#endif /* SMP */ #endif /* SMP */
perf_event_init(); perf_event_init();

View File

@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
* re-elected due to buddy favours. * re-elected due to buddy favours.
*/ */
clear_buddies(cfs_rq, curr); clear_buddies(cfs_rq, curr);
return;
}
/*
* Ensure that a task that missed wakeup preemption by a
* narrow margin doesn't have to wait for a full slice.
* This also mitigates buddy induced latencies under load.
*/
if (!sched_feat(WAKEUP_PREEMPT))
return;
if (delta_exec < sysctl_sched_min_granularity)
return;
if (cfs_rq->nr_running > 1) {
struct sched_entity *se = __pick_next_entity(cfs_rq);
s64 delta = curr->vruntime - se->vruntime;
if (delta > ideal_runtime)
resched_task(rq_of(cfs_rq)->curr);
} }
} }
@ -861,21 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
{ {
struct sched_entity *se = __pick_next_entity(cfs_rq); struct sched_entity *se = __pick_next_entity(cfs_rq);
struct sched_entity *buddy; struct sched_entity *left = se;
if (cfs_rq->next) { if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
buddy = cfs_rq->next; se = cfs_rq->next;
cfs_rq->next = NULL;
if (wakeup_preempt_entity(buddy, se) < 1)
return buddy;
}
if (cfs_rq->last) { /*
buddy = cfs_rq->last; * Prefer last buddy, try to return the CPU to a preempted task.
cfs_rq->last = NULL; */
if (wakeup_preempt_entity(buddy, se) < 1) if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
return buddy; se = cfs_rq->last;
}
clear_buddies(cfs_rq, se);
return se; return se;
} }
@ -1577,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct sched_entity *se = &curr->se, *pse = &p->se; struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
int sync = wake_flags & WF_SYNC; int sync = wake_flags & WF_SYNC;
int scale = cfs_rq->nr_running >= sched_nr_latency;
update_curr(cfs_rq); update_curr(cfs_rq);
@ -1591,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(se == pse)) if (unlikely(se == pse))
return; return;
/* if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
* Only set the backward buddy when the current task is still on the
* rq. This can happen when a wakeup gets interleaved with schedule on
* the ->pre_schedule() or idle_balance() point, either of which can
* drop the rq lock.
*
* Also, during early boot the idle thread is in the fair class, for
* obvious reasons its a bad idea to schedule back to the idle thread.
*/
if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
set_last_buddy(se);
if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
set_next_buddy(pse); set_next_buddy(pse);
/* /*
@ -1648,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
BUG_ON(!pse); BUG_ON(!pse);
if (wakeup_preempt_entity(se, pse) == 1) if (wakeup_preempt_entity(se, pse) == 1) {
resched_task(curr); resched_task(curr);
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
* with schedule on the ->pre_schedule() or idle_balance()
* point, either of which can * drop the rq lock.
*
* Also, during early boot the idle thread is in the fair class,
* for obvious reasons its a bad idea to schedule back to it.
*/
if (unlikely(!se->on_rq || curr == rq->idle))
return;
if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
set_last_buddy(se);
}
} }
static struct task_struct *pick_next_task_fair(struct rq *rq) static struct task_struct *pick_next_task_fair(struct rq *rq)

View File

@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
ret = ftrace_process_regex(parser->buffer, ret = ftrace_process_regex(parser->buffer,
parser->idx, enable); parser->idx, enable);
if (ret) if (ret)
goto out; goto out_unlock;
trace_parser_clear(parser); trace_parser_clear(parser);
} }
ret = read; ret = read;
out_unlock:
mutex_unlock(&ftrace_regex_lock); mutex_unlock(&ftrace_regex_lock);
out:
return ret; return ret;
} }

View File

@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
atomic_inc(&cpu_buffer->record_disabled); atomic_inc(&cpu_buffer->record_disabled);
synchronize_sched(); synchronize_sched();
spin_lock_irq(&cpu_buffer->reader_lock);
rb_head_page_deactivate(cpu_buffer); rb_head_page_deactivate(cpu_buffer);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
return; return;
rb_reset_cpu(cpu_buffer); rb_reset_cpu(cpu_buffer);
spin_unlock_irq(&cpu_buffer->reader_lock);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);

View File

@ -819,9 +819,11 @@ static void check_unmap(struct dma_debug_entry *ref)
err_printk(ref->dev, entry, "DMA-API: device driver frees " err_printk(ref->dev, entry, "DMA-API: device driver frees "
"DMA memory with different CPU address " "DMA memory with different CPU address "
"[device address=0x%016llx] [size=%llu bytes] " "[device address=0x%016llx] [size=%llu bytes] "
"[cpu alloc address=%p] [cpu free address=%p]", "[cpu alloc address=0x%016llx] "
"[cpu free address=0x%016llx]",
ref->dev_addr, ref->size, ref->dev_addr, ref->size,
(void *)entry->paddr, (void *)ref->paddr); (unsigned long long)entry->paddr,
(unsigned long long)ref->paddr);
} }
if (ref->sg_call_ents && ref->type == dma_debug_sg && if (ref->sg_call_ents && ref->type == dma_debug_sg &&