Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
This commit is contained in:
commit
cbb7fe129b
1
.gitignore
vendored
1
.gitignore
vendored
@ -49,6 +49,7 @@ include/linux/compile.h
|
||||
include/linux/version.h
|
||||
include/linux/utsrelease.h
|
||||
include/linux/bounds.h
|
||||
include/generated
|
||||
|
||||
# stgit generated dirs
|
||||
patches-*
|
||||
|
@ -316,6 +316,16 @@ more details, with real examples.
|
||||
#arch/m68k/fpsp040/Makefile
|
||||
ldflags-y := -x
|
||||
|
||||
subdir-ccflags-y, subdir-asflags-y
|
||||
The two flags listed above are similar to ccflags-y and as-falgs-y.
|
||||
The difference is that the subdir- variants has effect for the kbuild
|
||||
file where tey are present and all subdirectories.
|
||||
Options specified using subdir-* are added to the commandline before
|
||||
the options specified using the non-subdir variants.
|
||||
|
||||
Example:
|
||||
subdir-ccflags-y := -Werror
|
||||
|
||||
CFLAGS_$@, AFLAGS_$@
|
||||
|
||||
CFLAGS_$@ and AFLAGS_$@ only apply to commands in current
|
||||
|
1
Documentation/lguest/.gitignore
vendored
Normal file
1
Documentation/lguest/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
lguest
|
@ -3,11 +3,11 @@
|
||||
/, /` - or, A Young Coder's Illustrated Hypervisor
|
||||
\\"--\\ http://lguest.ozlabs.org
|
||||
|
||||
Lguest is designed to be a minimal hypervisor for the Linux kernel, for
|
||||
Linux developers and users to experiment with virtualization with the
|
||||
minimum of complexity. Nonetheless, it should have sufficient
|
||||
features to make it useful for specific tasks, and, of course, you are
|
||||
encouraged to fork and enhance it (see drivers/lguest/README).
|
||||
Lguest is designed to be a minimal 32-bit x86 hypervisor for the Linux kernel,
|
||||
for Linux developers and users to experiment with virtualization with the
|
||||
minimum of complexity. Nonetheless, it should have sufficient features to
|
||||
make it useful for specific tasks, and, of course, you are encouraged to fork
|
||||
and enhance it (see drivers/lguest/README).
|
||||
|
||||
Features:
|
||||
|
||||
@ -37,6 +37,7 @@ Running Lguest:
|
||||
"Paravirtualized guest support" = Y
|
||||
"Lguest guest support" = Y
|
||||
"High Memory Support" = off/4GB
|
||||
"PAE (Physical Address Extension) Support" = N
|
||||
"Alignment value to which kernel should be aligned" = 0x100000
|
||||
(CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and
|
||||
CONFIG_PHYSICAL_ALIGN=0x100000)
|
||||
|
22
MAINTAINERS
22
MAINTAINERS
@ -1287,6 +1287,14 @@ S: Maintained
|
||||
F: Documentation/video4linux/bttv/
|
||||
F: drivers/media/video/bt8xx/bttv*
|
||||
|
||||
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
|
||||
P: David Howells
|
||||
M: dhowells@redhat.com
|
||||
L: linux-cachefs@redhat.com
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/cachefiles.txt
|
||||
F: fs/cachefiles/
|
||||
|
||||
CAFE CMOS INTEGRATED CAMERA CONTROLLER DRIVER
|
||||
P: Jonathan Corbet
|
||||
M: corbet@lwn.net
|
||||
@ -2325,6 +2333,15 @@ F: Documentation/power/freezing-of-tasks.txt
|
||||
F: include/linux/freezer.h
|
||||
F: kernel/freezer.c
|
||||
|
||||
FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
|
||||
P: David Howells
|
||||
M: dhowells@redhat.com
|
||||
L: linux-cachefs@redhat.com
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/
|
||||
F: fs/fscache/
|
||||
F: include/linux/fscache*.h
|
||||
|
||||
FTRACE
|
||||
P: Steven Rostedt
|
||||
M: rostedt@goodmis.org
|
||||
@ -5235,7 +5252,12 @@ M: perex@perex.cz
|
||||
P: Takashi Iwai
|
||||
M: tiwai@suse.de
|
||||
L: alsa-devel@alsa-project.org (subscribers-only)
|
||||
W: http://www.alsa-project.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound-2.6.git
|
||||
T: git git://git.alsa-project.org/alsa-kernel.git
|
||||
S: Maintained
|
||||
F: Documentation/sound/
|
||||
F: include/sound/
|
||||
F: sound/
|
||||
|
||||
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
|
||||
|
2
Makefile
2
Makefile
@ -1200,7 +1200,7 @@ CLEAN_FILES += vmlinux System.map \
|
||||
.tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
|
||||
|
||||
# Directories & files removed with 'make mrproper'
|
||||
MRPROPER_DIRS += include/config include2 usr/include
|
||||
MRPROPER_DIRS += include/config include2 usr/include include/generated
|
||||
MRPROPER_FILES += .config .config.old include/asm .version .old_version \
|
||||
include/linux/autoconf.h include/linux/version.h \
|
||||
include/linux/utsrelease.h \
|
||||
|
@ -46,7 +46,6 @@
|
||||
#include <asm/io.h>
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
#include <linux/blk.h>
|
||||
#include <asm/pgtable.h>
|
||||
#endif
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
#define LHCALL_FLUSH_ASYNC 0
|
||||
#define LHCALL_LGUEST_INIT 1
|
||||
#define LHCALL_SHUTDOWN 2
|
||||
#define LHCALL_LOAD_GDT 3
|
||||
#define LHCALL_NEW_PGTABLE 4
|
||||
#define LHCALL_FLUSH_TLB 5
|
||||
#define LHCALL_LOAD_IDT_ENTRY 6
|
||||
@ -17,6 +16,7 @@
|
||||
#define LHCALL_SET_PMD 15
|
||||
#define LHCALL_LOAD_TLS 16
|
||||
#define LHCALL_NOTIFY 17
|
||||
#define LHCALL_LOAD_GDT_ENTRY 18
|
||||
|
||||
#define LGUEST_TRAP_ENTRY 0x1F
|
||||
|
||||
|
@ -273,15 +273,15 @@ static void lguest_load_idt(const struct desc_ptr *desc)
|
||||
* controls the entire thing and the Guest asks it to make changes using the
|
||||
* LOAD_GDT hypercall.
|
||||
*
|
||||
* This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
|
||||
* hypercall and use that repeatedly to load a new IDT. I don't think it
|
||||
* really matters, but wouldn't it be nice if they were the same? Wouldn't
|
||||
* it be even better if you were the one to send the patch to fix it?
|
||||
* This is the exactly like the IDT code.
|
||||
*/
|
||||
static void lguest_load_gdt(const struct desc_ptr *desc)
|
||||
{
|
||||
BUG_ON((desc->size + 1) / 8 != GDT_ENTRIES);
|
||||
kvm_hypercall2(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES);
|
||||
unsigned int i;
|
||||
struct desc_struct *gdt = (void *)desc->address;
|
||||
|
||||
for (i = 0; i < (desc->size+1)/8; i++)
|
||||
kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b);
|
||||
}
|
||||
|
||||
/* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
|
||||
@ -291,7 +291,9 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
|
||||
const void *desc, int type)
|
||||
{
|
||||
native_write_gdt_entry(dt, entrynum, desc, type);
|
||||
kvm_hypercall2(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES);
|
||||
/* Tell Host about this new entry. */
|
||||
kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum,
|
||||
dt[entrynum].a, dt[entrynum].b);
|
||||
}
|
||||
|
||||
/* OK, I lied. There are three "thread local storage" GDT entries which change
|
||||
|
@ -300,9 +300,9 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
|
||||
static struct platform_suspend_ops acpi_suspend_ops = {
|
||||
.valid = acpi_suspend_state_valid,
|
||||
.begin = acpi_suspend_begin,
|
||||
.prepare = acpi_pm_prepare,
|
||||
.prepare_late = acpi_pm_prepare,
|
||||
.enter = acpi_suspend_enter,
|
||||
.finish = acpi_pm_finish,
|
||||
.wake = acpi_pm_finish,
|
||||
.end = acpi_pm_end,
|
||||
};
|
||||
|
||||
@ -328,9 +328,9 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
|
||||
static struct platform_suspend_ops acpi_suspend_ops_old = {
|
||||
.valid = acpi_suspend_state_valid,
|
||||
.begin = acpi_suspend_begin_old,
|
||||
.prepare = acpi_pm_disable_gpes,
|
||||
.prepare_late = acpi_pm_disable_gpes,
|
||||
.enter = acpi_suspend_enter,
|
||||
.finish = acpi_pm_finish,
|
||||
.wake = acpi_pm_finish,
|
||||
.end = acpi_pm_end,
|
||||
.recover = acpi_pm_finish,
|
||||
};
|
||||
|
@ -891,7 +891,8 @@ int device_add(struct device *dev)
|
||||
set_dev_node(dev, dev_to_node(parent));
|
||||
|
||||
/* first, register with generic layer. */
|
||||
error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev_name(dev));
|
||||
/* we require the name to be set before, and pass NULL */
|
||||
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
|
||||
if (error)
|
||||
goto Error;
|
||||
|
||||
|
@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *m
|
||||
int i, ret = -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32);
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
/* agp_free_memory() needs gart address */
|
||||
if (page == NULL)
|
||||
goto out;
|
||||
@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
|
||||
{
|
||||
struct page * page;
|
||||
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32);
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
||||
|
@ -2274,7 +2274,7 @@ rescan_last_byte:
|
||||
continue; /* nothing to display */
|
||||
}
|
||||
/* Glyph not found */
|
||||
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
|
||||
if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
|
||||
/* In legacy mode use the glyph we get by a 1:1 mapping.
|
||||
This would make absolutely no sense with Unicode in mind,
|
||||
but do this for ASCII characters since a font may lack
|
||||
|
@ -159,6 +159,9 @@ void drm_master_put(struct drm_master **master)
|
||||
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
if (file_priv->is_master)
|
||||
return 0;
|
||||
|
||||
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
|
||||
return -EINVAL;
|
||||
|
||||
@ -169,6 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||
file_priv->minor->master != file_priv->master) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
file_priv->minor->master = drm_master_get(file_priv->master);
|
||||
file_priv->is_master = 1;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
@ -178,10 +182,15 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
if (!file_priv->master)
|
||||
if (!file_priv->is_master)
|
||||
return -EINVAL;
|
||||
|
||||
if (!file_priv->minor->master)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_master_put(&file_priv->minor->master);
|
||||
file_priv->is_master = 0;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -132,6 +132,7 @@ void drm_sysfs_destroy(void)
|
||||
*/
|
||||
static void drm_sysfs_device_release(struct device *dev)
|
||||
{
|
||||
memset(dev, 0, sizeof(struct device));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -481,11 +481,13 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
|
||||
{
|
||||
int count = 10000000;
|
||||
|
||||
while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
|
||||
while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
|
||||
;
|
||||
|
||||
while (count-- && (VIA_READ(VIA_REG_STATUS) &
|
||||
while (count && (VIA_READ(VIA_REG_STATUS) &
|
||||
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
|
||||
VIA_3D_ENG_BUSY))) ;
|
||||
VIA_3D_ENG_BUSY)))
|
||||
--count;
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -705,7 +707,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
|
||||
switch (d_siz->func) {
|
||||
case VIA_CMDBUF_SPACE:
|
||||
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
|
||||
&& count--) {
|
||||
&& --count) {
|
||||
if (!d_siz->wait) {
|
||||
break;
|
||||
}
|
||||
@ -717,7 +719,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
|
||||
break;
|
||||
case VIA_CMDBUF_LAG:
|
||||
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
|
||||
&& count--) {
|
||||
&& --count) {
|
||||
if (!d_siz->wait) {
|
||||
break;
|
||||
}
|
||||
|
@ -158,7 +158,8 @@ void free_interrupts(void);
|
||||
/* segments.c: */
|
||||
void setup_default_gdt_entries(struct lguest_ro_state *state);
|
||||
void setup_guest_gdt(struct lg_cpu *cpu);
|
||||
void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num);
|
||||
void load_guest_gdt_entry(struct lg_cpu *cpu, unsigned int i,
|
||||
u32 low, u32 hi);
|
||||
void guest_load_tls(struct lg_cpu *cpu, unsigned long tls_array);
|
||||
void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt);
|
||||
void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt);
|
||||
|
@ -144,18 +144,19 @@ void copy_gdt(const struct lg_cpu *cpu, struct desc_struct *gdt)
|
||||
gdt[i] = cpu->arch.gdt[i];
|
||||
}
|
||||
|
||||
/*H:620 This is where the Guest asks us to load a new GDT (LHCALL_LOAD_GDT).
|
||||
* We copy it from the Guest and tweak the entries. */
|
||||
void load_guest_gdt(struct lg_cpu *cpu, unsigned long table, u32 num)
|
||||
/*H:620 This is where the Guest asks us to load a new GDT entry
|
||||
* (LHCALL_LOAD_GDT_ENTRY). We tweak the entry and copy it in. */
|
||||
void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi)
|
||||
{
|
||||
/* We assume the Guest has the same number of GDT entries as the
|
||||
* Host, otherwise we'd have to dynamically allocate the Guest GDT. */
|
||||
if (num > ARRAY_SIZE(cpu->arch.gdt))
|
||||
kill_guest(cpu, "too many gdt entries %i", num);
|
||||
|
||||
/* We read the whole thing in, then fix it up. */
|
||||
__lgread(cpu, cpu->arch.gdt, table, num * sizeof(cpu->arch.gdt[0]));
|
||||
fixup_gdt_table(cpu, 0, ARRAY_SIZE(cpu->arch.gdt));
|
||||
/* Set it up, then fix it. */
|
||||
cpu->arch.gdt[num].a = lo;
|
||||
cpu->arch.gdt[num].b = hi;
|
||||
fixup_gdt_table(cpu, num, num+1);
|
||||
/* Mark that the GDT changed so the core knows it has to copy it again,
|
||||
* even if the Guest is run on the same CPU. */
|
||||
cpu->changed |= CHANGED_GDT;
|
||||
|
@ -324,6 +324,11 @@ static void rewrite_hypercall(struct lg_cpu *cpu)
|
||||
u8 insn[3] = {0xcd, 0x1f, 0x90};
|
||||
|
||||
__lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
|
||||
/* The above write might have caused a copy of that page to be made
|
||||
* (if it was read-only). We need to make sure the Guest has
|
||||
* up-to-date pagetables. As this doesn't happen often, we can just
|
||||
* drop them all. */
|
||||
guest_pagetable_clear_all(cpu);
|
||||
}
|
||||
|
||||
static bool is_hypercall(struct lg_cpu *cpu)
|
||||
@ -563,8 +568,8 @@ void __exit lguest_arch_host_fini(void)
|
||||
int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
|
||||
{
|
||||
switch (args->arg0) {
|
||||
case LHCALL_LOAD_GDT:
|
||||
load_guest_gdt(cpu, args->arg1, args->arg2);
|
||||
case LHCALL_LOAD_GDT_ENTRY:
|
||||
load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3);
|
||||
break;
|
||||
case LHCALL_LOAD_IDT_ENTRY:
|
||||
load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3);
|
||||
|
@ -1479,6 +1479,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
|
||||
s += blocks;
|
||||
}
|
||||
bitmap->last_end_sync = jiffies;
|
||||
sysfs_notify(&bitmap->mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
|
||||
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
|
||||
@ -1589,7 +1590,7 @@ void bitmap_destroy(mddev_t *mddev)
|
||||
int bitmap_create(mddev_t *mddev)
|
||||
{
|
||||
struct bitmap *bitmap;
|
||||
unsigned long blocks = mddev->resync_max_sectors;
|
||||
sector_t blocks = mddev->resync_max_sectors;
|
||||
unsigned long chunks;
|
||||
unsigned long pages;
|
||||
struct file *file = mddev->bitmap_file;
|
||||
@ -1631,8 +1632,8 @@ int bitmap_create(mddev_t *mddev)
|
||||
bitmap->chunkshift = ffz(~bitmap->chunksize);
|
||||
|
||||
/* now that chunksize and chunkshift are set, we can use these macros */
|
||||
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) /
|
||||
CHUNK_BLOCK_RATIO(bitmap);
|
||||
chunks = (blocks + CHUNK_BLOCK_RATIO(bitmap) - 1) >>
|
||||
CHUNK_BLOCK_SHIFT(bitmap);
|
||||
pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
|
||||
|
||||
BUG_ON(!pages);
|
||||
|
@ -2017,6 +2017,8 @@ repeat:
|
||||
clear_bit(MD_CHANGE_PENDING, &mddev->flags);
|
||||
spin_unlock_irq(&mddev->write_lock);
|
||||
wake_up(&mddev->sb_wait);
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
|
||||
}
|
||||
|
||||
@ -2086,6 +2088,7 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
|
||||
* -writemostly - clears write_mostly
|
||||
* blocked - sets the Blocked flag
|
||||
* -blocked - clears the Blocked flag
|
||||
* insync - sets Insync providing device isn't active
|
||||
*/
|
||||
int err = -EINVAL;
|
||||
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
|
||||
@ -2117,6 +2120,9 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
|
||||
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
|
||||
md_wakeup_thread(rdev->mddev->thread);
|
||||
|
||||
err = 0;
|
||||
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
|
||||
set_bit(In_sync, &rdev->flags);
|
||||
err = 0;
|
||||
}
|
||||
if (!err && rdev->sysfs_state)
|
||||
@ -2190,7 +2196,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
|
||||
} else if (rdev->mddev->pers) {
|
||||
mdk_rdev_t *rdev2;
|
||||
/* Activating a spare .. or possibly reactivating
|
||||
* if we every get bitmaps working here.
|
||||
* if we ever get bitmaps working here.
|
||||
*/
|
||||
|
||||
if (rdev->raid_disk != -1)
|
||||
@ -3482,12 +3488,15 @@ sync_completed_show(mddev_t *mddev, char *page)
|
||||
{
|
||||
unsigned long max_sectors, resync;
|
||||
|
||||
if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return sprintf(page, "none\n");
|
||||
|
||||
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
|
||||
max_sectors = mddev->resync_max_sectors;
|
||||
else
|
||||
max_sectors = mddev->dev_sectors;
|
||||
|
||||
resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active));
|
||||
resync = mddev->curr_resync_completed;
|
||||
return sprintf(page, "%lu / %lu\n", resync, max_sectors);
|
||||
}
|
||||
|
||||
@ -6334,18 +6343,13 @@ void md_do_sync(mddev_t *mddev)
|
||||
sector_t sectors;
|
||||
|
||||
skipped = 0;
|
||||
if (j >= mddev->resync_max) {
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
wait_event(mddev->recovery_wait,
|
||||
mddev->resync_max > j
|
||||
|| kthread_should_stop());
|
||||
}
|
||||
if (kthread_should_stop())
|
||||
goto interrupted;
|
||||
|
||||
if (mddev->curr_resync > mddev->curr_resync_completed &&
|
||||
(mddev->curr_resync - mddev->curr_resync_completed)
|
||||
> (max_sectors >> 4)) {
|
||||
if ((mddev->curr_resync > mddev->curr_resync_completed &&
|
||||
(mddev->curr_resync - mddev->curr_resync_completed)
|
||||
> (max_sectors >> 4)) ||
|
||||
(j - mddev->curr_resync_completed)*2
|
||||
>= mddev->resync_max - mddev->curr_resync_completed
|
||||
) {
|
||||
/* time to update curr_resync_completed */
|
||||
blk_unplug(mddev->queue);
|
||||
wait_event(mddev->recovery_wait,
|
||||
@ -6353,7 +6357,17 @@ void md_do_sync(mddev_t *mddev)
|
||||
mddev->curr_resync_completed =
|
||||
mddev->curr_resync;
|
||||
set_bit(MD_CHANGE_CLEAN, &mddev->flags);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
|
||||
if (j >= mddev->resync_max)
|
||||
wait_event(mddev->recovery_wait,
|
||||
mddev->resync_max > j
|
||||
|| kthread_should_stop());
|
||||
|
||||
if (kthread_should_stop())
|
||||
goto interrupted;
|
||||
|
||||
sectors = mddev->pers->sync_request(mddev, j, &skipped,
|
||||
currspeed < speed_min(mddev));
|
||||
if (sectors == 0) {
|
||||
@ -6461,6 +6475,7 @@ void md_do_sync(mddev_t *mddev)
|
||||
|
||||
skip:
|
||||
mddev->curr_resync = 0;
|
||||
mddev->curr_resync_completed = 0;
|
||||
mddev->resync_min = 0;
|
||||
mddev->resync_max = MaxSector;
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
|
@ -12,10 +12,17 @@
|
||||
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef _MD_K_H
|
||||
#define _MD_K_H
|
||||
#ifndef _MD_MD_H
|
||||
#define _MD_MD_H
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define MaxSector (~(sector_t)0)
|
||||
|
||||
@ -408,10 +415,6 @@ static inline void safe_put_page(struct page *p)
|
||||
if (p) put_page(p);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
#endif
|
||||
|
||||
|
||||
extern int register_md_personality(struct mdk_personality *p);
|
||||
extern int unregister_md_personality(struct mdk_personality *p);
|
||||
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
|
||||
@ -434,3 +437,5 @@ extern void md_new_event(mddev_t *mddev);
|
||||
extern int md_allow_write(mddev_t *mddev);
|
||||
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
|
||||
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
|
||||
|
||||
#endif /* _MD_MD_H */
|
||||
|
@ -3845,6 +3845,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
|
||||
wait_event(conf->wait_for_overlap,
|
||||
atomic_read(&conf->reshape_stripes)==0);
|
||||
mddev->reshape_position = conf->reshape_progress;
|
||||
mddev->curr_resync_completed = mddev->curr_resync;
|
||||
conf->reshape_checkpoint = jiffies;
|
||||
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
@ -3854,6 +3855,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
|
||||
conf->reshape_safe = mddev->reshape_position;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
|
||||
if (mddev->delta_disks < 0) {
|
||||
@ -3938,11 +3940,13 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
|
||||
* then we need to write out the superblock.
|
||||
*/
|
||||
sector_nr += reshape_sectors;
|
||||
if (sector_nr >= mddev->resync_max) {
|
||||
if ((sector_nr - mddev->curr_resync_completed) * 2
|
||||
>= mddev->resync_max - mddev->curr_resync_completed) {
|
||||
/* Cannot proceed until we've updated the superblock... */
|
||||
wait_event(conf->wait_for_overlap,
|
||||
atomic_read(&conf->reshape_stripes) == 0);
|
||||
mddev->reshape_position = conf->reshape_progress;
|
||||
mddev->curr_resync_completed = mddev->curr_resync;
|
||||
conf->reshape_checkpoint = jiffies;
|
||||
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
@ -3953,6 +3957,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
|
||||
conf->reshape_safe = mddev->reshape_position;
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
||||
}
|
||||
return reshape_sectors;
|
||||
}
|
||||
|
@ -190,7 +190,8 @@ static int balloon(void *_vballoon)
|
||||
try_to_freeze();
|
||||
wait_event_interruptible(vb->config_change,
|
||||
(diff = towards_target(vb)) != 0
|
||||
|| kthread_should_stop());
|
||||
|| kthread_should_stop()
|
||||
|| freezing(current));
|
||||
if (diff > 0)
|
||||
fill_balloon(vb, diff);
|
||||
else if (diff < 0)
|
||||
|
@ -1920,8 +1920,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
|
||||
if (data_page)
|
||||
((char *)data_page)[PAGE_SIZE - 1] = 0;
|
||||
|
||||
/* Default to relatime */
|
||||
mnt_flags |= MNT_RELATIME;
|
||||
/* Default to relatime unless overriden */
|
||||
if (!(flags & MS_NOATIME))
|
||||
mnt_flags |= MNT_RELATIME;
|
||||
|
||||
/* Separate the per-mountpoint flags */
|
||||
if (flags & MS_NOSUID)
|
||||
|
@ -21,6 +21,8 @@ extern long prctl_set_seccomp(unsigned long);
|
||||
|
||||
#else /* CONFIG_SECCOMP */
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
typedef struct { } seccomp_t;
|
||||
|
||||
#define secure_computing(x) do { } while (0)
|
||||
|
@ -58,10 +58,17 @@ typedef int __bitwise suspend_state_t;
|
||||
* by @begin().
|
||||
* @prepare() is called right after devices have been suspended (ie. the
|
||||
* appropriate .suspend() method has been executed for each device) and
|
||||
* before the nonboot CPUs are disabled (it is executed with IRQs enabled).
|
||||
* This callback is optional. It returns 0 on success or a negative
|
||||
* error code otherwise, in which case the system cannot enter the desired
|
||||
* sleep state (@enter() and @finish() will not be called in that case).
|
||||
* before device drivers' late suspend callbacks are executed. It returns
|
||||
* 0 on success or a negative error code otherwise, in which case the
|
||||
* system cannot enter the desired sleep state (@prepare_late(), @enter(),
|
||||
* @wake(), and @finish() will not be called in that case).
|
||||
*
|
||||
* @prepare_late: Finish preparing the platform for entering the system sleep
|
||||
* state indicated by @begin().
|
||||
* @prepare_late is called before disabling nonboot CPUs and after
|
||||
* device drivers' late suspend callbacks have been executed. It returns
|
||||
* 0 on success or a negative error code otherwise, in which case the
|
||||
* system cannot enter the desired sleep state (@enter() and @wake()).
|
||||
*
|
||||
* @enter: Enter the system sleep state indicated by @begin() or represented by
|
||||
* the argument if @begin() is not implemented.
|
||||
@ -69,19 +76,26 @@ typedef int __bitwise suspend_state_t;
|
||||
* error code otherwise, in which case the system cannot enter the desired
|
||||
* sleep state.
|
||||
*
|
||||
* @finish: Called when the system has just left a sleep state, right after
|
||||
* the nonboot CPUs have been enabled and before devices are resumed (it is
|
||||
* executed with IRQs enabled).
|
||||
* @wake: Called when the system has just left a sleep state, right after
|
||||
* the nonboot CPUs have been enabled and before device drivers' early
|
||||
* resume callbacks are executed.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @prepare_late(). If implemented, it is always called
|
||||
* after @enter(), even if @enter() fails.
|
||||
*
|
||||
* @finish: Finish wake-up of the platform.
|
||||
* @finish is called right prior to calling device drivers' regular suspend
|
||||
* callbacks.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @prepare(). If implemented, it is always called after
|
||||
* @enter() (even if @enter() fails).
|
||||
* @enter() and @wake(), if implemented, even if any of them fails.
|
||||
*
|
||||
* @end: Called by the PM core right after resuming devices, to indicate to
|
||||
* the platform that the system has returned to the working state or
|
||||
* the transition to the sleep state has been aborted.
|
||||
* This callback is optional, but should be implemented by the platforms
|
||||
* that implement @begin(), but platforms implementing @begin() should
|
||||
* also provide a @end() which cleans up transitions aborted before
|
||||
* that implement @begin(). Accordingly, platforms implementing @begin()
|
||||
* should also provide a @end() which cleans up transitions aborted before
|
||||
* @enter().
|
||||
*
|
||||
* @recover: Recover the platform from a suspend failure.
|
||||
@ -93,7 +107,9 @@ struct platform_suspend_ops {
|
||||
int (*valid)(suspend_state_t state);
|
||||
int (*begin)(suspend_state_t state);
|
||||
int (*prepare)(void);
|
||||
int (*prepare_late)(void);
|
||||
int (*enter)(suspend_state_t state);
|
||||
void (*wake)(void);
|
||||
void (*finish)(void);
|
||||
void (*end)(void);
|
||||
void (*recover)(void);
|
||||
|
@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state)
|
||||
|
||||
device_pm_lock();
|
||||
|
||||
if (suspend_ops->prepare) {
|
||||
error = suspend_ops->prepare();
|
||||
if (error)
|
||||
goto Done;
|
||||
}
|
||||
|
||||
error = device_power_down(PMSG_SUSPEND);
|
||||
if (error) {
|
||||
printk(KERN_ERR "PM: Some devices failed to power down\n");
|
||||
goto Done;
|
||||
goto Platfrom_finish;
|
||||
}
|
||||
|
||||
if (suspend_ops->prepare) {
|
||||
error = suspend_ops->prepare();
|
||||
if (suspend_ops->prepare_late) {
|
||||
error = suspend_ops->prepare_late();
|
||||
if (error)
|
||||
goto Power_up_devices;
|
||||
}
|
||||
|
||||
if (suspend_test(TEST_PLATFORM))
|
||||
goto Platfrom_finish;
|
||||
goto Platform_wake;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (error || suspend_test(TEST_CPUS))
|
||||
@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state)
|
||||
Enable_cpus:
|
||||
enable_nonboot_cpus();
|
||||
|
||||
Platfrom_finish:
|
||||
if (suspend_ops->finish)
|
||||
suspend_ops->finish();
|
||||
Platform_wake:
|
||||
if (suspend_ops->wake)
|
||||
suspend_ops->wake();
|
||||
|
||||
Power_up_devices:
|
||||
device_power_up(PMSG_RESUME);
|
||||
|
||||
Platfrom_finish:
|
||||
if (suspend_ops->finish)
|
||||
suspend_ops->finish();
|
||||
|
||||
Done:
|
||||
device_pm_unlock();
|
||||
|
||||
|
@ -218,6 +218,9 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
|
||||
const char *old_name = kobj->name;
|
||||
char *s;
|
||||
|
||||
if (kobj->name && !fmt)
|
||||
return 0;
|
||||
|
||||
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
|
||||
if (!kobj->name)
|
||||
return -ENOMEM;
|
||||
|
@ -27,6 +27,9 @@ ccflags-y :=
|
||||
cppflags-y :=
|
||||
ldflags-y :=
|
||||
|
||||
subdir-asflags-y :=
|
||||
subdir-ccflags-y :=
|
||||
|
||||
# Read auto.conf if it exists, otherwise ignore
|
||||
-include include/config/auto.conf
|
||||
|
||||
|
@ -4,6 +4,11 @@ ccflags-y += $(EXTRA_CFLAGS)
|
||||
cppflags-y += $(EXTRA_CPPFLAGS)
|
||||
ldflags-y += $(EXTRA_LDFLAGS)
|
||||
|
||||
#
|
||||
# flags that take effect in sub directories
|
||||
export KBUILD_SUBDIR_ASFLAGS := $(KBUILD_SUBDIR_ASFLAGS) $(subdir-asflags-y)
|
||||
export KBUILD_SUBDIR_CCFLAGS := $(KBUILD_SUBDIR_CCFLAGS) $(subdir-ccflags-y)
|
||||
|
||||
# Figure out what we need to build from the various variables
|
||||
# ===========================================================================
|
||||
|
||||
@ -104,10 +109,10 @@ else
|
||||
debug_flags =
|
||||
endif
|
||||
|
||||
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \
|
||||
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
|
||||
$(ccflags-y) $(CFLAGS_$(basetarget).o)
|
||||
_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
|
||||
_a_flags = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) \
|
||||
_a_flags = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
|
||||
$(asflags-y) $(AFLAGS_$(basetarget).o)
|
||||
_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F))
|
||||
|
||||
|
@ -490,7 +490,7 @@ void snd_emu10k1_wait(struct snd_emu10k1 *emu, unsigned int wait)
|
||||
if (newtime != curtime)
|
||||
break;
|
||||
}
|
||||
if (count >= 16384)
|
||||
if (count > 16384)
|
||||
break;
|
||||
curtime = newtime;
|
||||
}
|
||||
|
@ -642,19 +642,21 @@ static int get_codec_name(struct hda_codec *codec)
|
||||
*/
|
||||
static void /*__devinit*/ setup_fg_nodes(struct hda_codec *codec)
|
||||
{
|
||||
int i, total_nodes;
|
||||
int i, total_nodes, function_id;
|
||||
hda_nid_t nid;
|
||||
|
||||
total_nodes = snd_hda_get_sub_nodes(codec, AC_NODE_ROOT, &nid);
|
||||
for (i = 0; i < total_nodes; i++, nid++) {
|
||||
codec->function_id = snd_hda_param_read(codec, nid,
|
||||
function_id = snd_hda_param_read(codec, nid,
|
||||
AC_PAR_FUNCTION_TYPE) & 0xff;
|
||||
switch (codec->function_id) {
|
||||
switch (function_id) {
|
||||
case AC_GRP_AUDIO_FUNCTION:
|
||||
codec->afg = nid;
|
||||
codec->function_id = function_id;
|
||||
break;
|
||||
case AC_GRP_MODEM_FUNCTION:
|
||||
codec->mfg = nid;
|
||||
codec->function_id = function_id;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -312,7 +312,6 @@ struct azx_dev {
|
||||
unsigned int period_bytes; /* size of the period in bytes */
|
||||
unsigned int frags; /* number for period in the play buffer */
|
||||
unsigned int fifo_size; /* FIFO size */
|
||||
unsigned int start_flag: 1; /* stream full start flag */
|
||||
unsigned long start_jiffies; /* start + minimum jiffies */
|
||||
unsigned long min_jiffies; /* minimum jiffies before position is valid */
|
||||
|
||||
@ -333,6 +332,7 @@ struct azx_dev {
|
||||
unsigned int opened :1;
|
||||
unsigned int running :1;
|
||||
unsigned int irq_pending :1;
|
||||
unsigned int start_flag: 1; /* stream full start flag */
|
||||
/*
|
||||
* For VIA:
|
||||
* A flag to ensure DMA position is 0
|
||||
|
@ -3977,6 +3977,14 @@ static int patch_ad1884a(struct hda_codec *codec)
|
||||
spec->input_mux = &ad1884a_laptop_capture_source;
|
||||
codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
|
||||
codec->patch_ops.init = ad1884a_hp_init;
|
||||
/* set the upper-limit for mixer amp to 0dB for avoiding the
|
||||
* possible damage by overloading
|
||||
*/
|
||||
snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
|
||||
(0x17 << AC_AMPCAP_OFFSET_SHIFT) |
|
||||
(0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
|
||||
(0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
|
||||
(1 << AC_AMPCAP_MUTE_SHIFT));
|
||||
break;
|
||||
case AD1884A_MOBILE:
|
||||
spec->mixers[0] = ad1884a_mobile_mixers;
|
||||
|
@ -3076,6 +3076,11 @@ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs,
|
||||
unsigned int wid_caps;
|
||||
|
||||
for (i = 0; i < num_outs && i < ARRAY_SIZE(chname); i++) {
|
||||
if (type == AUTO_PIN_HP_OUT && !spec->hp_detect) {
|
||||
wid_caps = get_wcaps(codec, pins[i]);
|
||||
if (wid_caps & AC_WCAP_UNSOL_CAP)
|
||||
spec->hp_detect = 1;
|
||||
}
|
||||
nid = dac_nids[i];
|
||||
if (!nid)
|
||||
continue;
|
||||
@ -3119,11 +3124,6 @@ static int create_multi_out_ctls(struct hda_codec *codec, int num_outs,
|
||||
err = create_controls_idx(codec, name, idx, nid, 3);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (type == AUTO_PIN_HP_OUT && !spec->hp_detect) {
|
||||
wid_caps = get_wcaps(codec, pins[i]);
|
||||
if (wid_caps & AC_WCAP_UNSOL_CAP)
|
||||
spec->hp_detect = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
@ -1852,6 +1852,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
|
||||
.name = "Dell Unknown", /* STAC9750/51 */
|
||||
.type = AC97_TUNE_HP_ONLY
|
||||
},
|
||||
{
|
||||
.subvendor = 0x1028,
|
||||
.subdevice = 0x016a,
|
||||
.name = "Dell Inspiron 8600", /* STAC9750/51 */
|
||||
.type = AC97_TUNE_HP_ONLY
|
||||
},
|
||||
{
|
||||
.subvendor = 0x1028,
|
||||
.subdevice = 0x0186,
|
||||
@ -1894,12 +1900,6 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
|
||||
.name = "HP nc6000",
|
||||
.type = AC97_TUNE_MUTE_LED
|
||||
},
|
||||
{
|
||||
.subvendor = 0x103c,
|
||||
.subdevice = 0x0934,
|
||||
.name = "HP nx8220",
|
||||
.type = AC97_TUNE_MUTE_LED
|
||||
},
|
||||
{
|
||||
.subvendor = 0x103c,
|
||||
.subdevice = 0x129d,
|
||||
|
@ -283,7 +283,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
|
||||
break;
|
||||
case SND_SOC_DAIFMT_DSP_B:
|
||||
regs->srgr2 |= FPER(wlen * channels - 1);
|
||||
regs->srgr1 |= FWID(wlen * channels - 2);
|
||||
regs->srgr1 |= FWID(0);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -302,6 +302,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
||||
{
|
||||
struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
|
||||
struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
|
||||
unsigned int temp_fmt = fmt;
|
||||
|
||||
if (mcbsp_data->configured)
|
||||
return 0;
|
||||
@ -328,6 +329,8 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
||||
/* 0-bit data delay */
|
||||
regs->rcr2 |= RDATDLY(0);
|
||||
regs->xcr2 |= XDATDLY(0);
|
||||
/* Invert FS polarity configuration */
|
||||
temp_fmt ^= SND_SOC_DAIFMT_NB_IF;
|
||||
break;
|
||||
default:
|
||||
/* Unsupported data format */
|
||||
@ -351,7 +354,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
||||
}
|
||||
|
||||
/* Set bit clock (CLKX/CLKR) and FS polarities */
|
||||
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
|
||||
switch (temp_fmt & SND_SOC_DAIFMT_INV_MASK) {
|
||||
case SND_SOC_DAIFMT_NB_NF:
|
||||
/*
|
||||
* Normal BCLK + FS.
|
||||
|
@ -62,7 +62,7 @@ static int osk_hw_params(struct snd_pcm_substream *substream,
|
||||
/* Set codec DAI configuration */
|
||||
err = snd_soc_dai_set_fmt(codec_dai,
|
||||
SND_SOC_DAIFMT_DSP_B |
|
||||
SND_SOC_DAIFMT_NB_IF |
|
||||
SND_SOC_DAIFMT_NB_NF |
|
||||
SND_SOC_DAIFMT_CBM_CFM);
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR "can't set codec DAI configuration\n");
|
||||
@ -72,7 +72,7 @@ static int osk_hw_params(struct snd_pcm_substream *substream,
|
||||
/* Set cpu DAI configuration */
|
||||
err = snd_soc_dai_set_fmt(cpu_dai,
|
||||
SND_SOC_DAIFMT_DSP_B |
|
||||
SND_SOC_DAIFMT_NB_IF |
|
||||
SND_SOC_DAIFMT_NB_NF |
|
||||
SND_SOC_DAIFMT_CBM_CFM);
|
||||
if (err < 0) {
|
||||
printk(KERN_ERR "can't set cpu DAI configuration\n");
|
||||
|
@ -806,6 +806,7 @@ static int pxa_ssp_probe(struct platform_device *pdev,
|
||||
goto err_priv;
|
||||
}
|
||||
|
||||
priv->dai_fmt = (unsigned int) -1;
|
||||
dai->private_data = priv;
|
||||
|
||||
return 0;
|
||||
|
@ -69,8 +69,8 @@ static int jive_hw_params(struct snd_pcm_substream *substream,
|
||||
break;
|
||||
}
|
||||
|
||||
s3c_i2sv2_calc_rate(&div, NULL, params_rate(params),
|
||||
s3c2412_get_iisclk());
|
||||
s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
|
||||
s3c2412_get_iisclk());
|
||||
|
||||
/* set codec DAI configuration */
|
||||
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
|
||||
@ -145,8 +145,9 @@ static struct snd_soc_dai_link jive_dai = {
|
||||
};
|
||||
|
||||
/* jive audio machine driver */
|
||||
static struct snd_soc_machine snd_soc_machine_jive = {
|
||||
static struct snd_soc_card snd_soc_machine_jive = {
|
||||
.name = "Jive",
|
||||
.platform = &s3c24xx_soc_platform,
|
||||
.dai_link = &jive_dai,
|
||||
.num_links = 1,
|
||||
};
|
||||
@ -157,9 +158,8 @@ static struct wm8750_setup_data jive_wm8750_setup = {
|
||||
|
||||
/* jive audio subsystem */
|
||||
static struct snd_soc_device jive_snd_devdata = {
|
||||
.machine = &snd_soc_machine_jive,
|
||||
.platform = &s3c24xx_soc_platform,
|
||||
.codec_dev = &soc_codec_dev_wm8750_spi,
|
||||
.card = &snd_soc_machine_jive,
|
||||
.codec_dev = &soc_codec_dev_wm8750,
|
||||
.codec_data = &jive_wm8750_setup,
|
||||
};
|
||||
|
||||
|
@ -473,9 +473,9 @@ static int s3c2412_i2s_set_clkdiv(struct snd_soc_dai *cpu_dai,
|
||||
/* default table of all avaialable root fs divisors */
|
||||
static unsigned int iis_fs_tab[] = { 256, 512, 384, 768 };
|
||||
|
||||
int s3c2412_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
|
||||
unsigned int *fstab,
|
||||
unsigned int rate, struct clk *clk)
|
||||
int s3c_i2sv2_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
|
||||
unsigned int *fstab,
|
||||
unsigned int rate, struct clk *clk)
|
||||
{
|
||||
unsigned long clkrate = clk_get_rate(clk);
|
||||
unsigned int div;
|
||||
@ -531,7 +531,7 @@ int s3c2412_iis_calc_rate(struct s3c_i2sv2_rate_calc *info,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(s3c2412_iis_calc_rate);
|
||||
EXPORT_SYMBOL_GPL(s3c_i2sv2_iis_calc_rate);
|
||||
|
||||
int s3c_i2sv2_probe(struct platform_device *pdev,
|
||||
struct snd_soc_dai *dai,
|
||||
@ -624,10 +624,12 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
|
||||
|
||||
int s3c_i2sv2_register_dai(struct snd_soc_dai *dai)
|
||||
{
|
||||
dai->ops.trigger = s3c2412_i2s_trigger;
|
||||
dai->ops.hw_params = s3c2412_i2s_hw_params;
|
||||
dai->ops.set_fmt = s3c2412_i2s_set_fmt;
|
||||
dai->ops.set_clkdiv = s3c2412_i2s_set_clkdiv;
|
||||
struct snd_soc_dai_ops *ops = dai->ops;
|
||||
|
||||
ops->trigger = s3c2412_i2s_trigger;
|
||||
ops->hw_params = s3c2412_i2s_hw_params;
|
||||
ops->set_fmt = s3c2412_i2s_set_fmt;
|
||||
ops->set_clkdiv = s3c2412_i2s_set_clkdiv;
|
||||
|
||||
dai->suspend = s3c2412_i2s_suspend;
|
||||
dai->resume = s3c2412_i2s_resume;
|
||||
|
@ -33,8 +33,8 @@
|
||||
|
||||
#include <plat/regs-s3c2412-iis.h>
|
||||
|
||||
#include <plat/regs-gpio.h>
|
||||
#include <plat/audio.h>
|
||||
#include <mach/regs-gpio.h>
|
||||
#include <mach/dma.h>
|
||||
|
||||
#include "s3c24xx-pcm.h"
|
||||
|
Loading…
Reference in New Issue
Block a user