Merge branch 'linus' into irq/threaded

Conflicts:
	include/linux/irq.h
	kernel/irq/handle.c
This commit is contained in:
Ingo Molnar
2009-04-06 01:41:22 +02:00
7471 changed files with 862421 additions and 312245 deletions

View File

@@ -103,8 +103,9 @@
#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
#define AE_BAD_ADDRESS (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
#define AE_CODE_PGM_MAX 0x0008
#define AE_CODE_PGM_MAX 0x0009
/*
* Acpi table exceptions
@@ -224,7 +225,8 @@ char const *acpi_gbl_exception_names_pgm[] = {
"AE_BAD_HEX_CONSTANT",
"AE_BAD_OCTAL_CONSTANT",
"AE_BAD_DECIMAL_CONSTANT",
"AE_MISSING_ARGUMENTS"
"AE_MISSING_ARGUMENTS",
"AE_BAD_ADDRESS"
};
char const *acpi_gbl_exception_names_tbl[] = {

View File

@@ -88,44 +88,30 @@ struct acpi_device;
typedef int (*acpi_op_add) (struct acpi_device * device);
typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
typedef int (*acpi_op_start) (struct acpi_device * device);
typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
typedef int (*acpi_op_suspend) (struct acpi_device * device,
pm_message_t state);
typedef int (*acpi_op_resume) (struct acpi_device * device);
typedef int (*acpi_op_scan) (struct acpi_device * device);
typedef int (*acpi_op_bind) (struct acpi_device * device);
typedef int (*acpi_op_unbind) (struct acpi_device * device);
typedef int (*acpi_op_shutdown) (struct acpi_device * device);
typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
struct acpi_bus_ops {
u32 acpi_op_add:1;
u32 acpi_op_remove:1;
u32 acpi_op_lock:1;
u32 acpi_op_start:1;
u32 acpi_op_stop:1;
u32 acpi_op_suspend:1;
u32 acpi_op_resume:1;
u32 acpi_op_scan:1;
u32 acpi_op_bind:1;
u32 acpi_op_unbind:1;
u32 acpi_op_shutdown:1;
u32 reserved:21;
};
struct acpi_device_ops {
acpi_op_add add;
acpi_op_remove remove;
acpi_op_lock lock;
acpi_op_start start;
acpi_op_stop stop;
acpi_op_suspend suspend;
acpi_op_resume resume;
acpi_op_scan scan;
acpi_op_bind bind;
acpi_op_unbind unbind;
acpi_op_shutdown shutdown;
acpi_op_notify notify;
};
struct acpi_driver {

View File

@@ -67,6 +67,16 @@
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
/*
* For fixed hardware buttons, we fabricate acpi_devices with HID
* ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware
* signals only an event; it doesn't supply a notification value.
* To allow drivers to treat notifications from fixed hardware the
* same as those from real devices, we turn the events into this
* notification value.
*/
#define ACPI_FIXED_HARDWARE_EVENT 0x100
/* --------------------------------------------------------------------------
PCI
-------------------------------------------------------------------------- */
@@ -98,24 +108,6 @@ int acpi_pci_bind_root(struct acpi_device *device, struct acpi_pci_id *id,
struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
int bus);
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
int acpi_power_get_inferred_state(struct acpi_device *device);
int acpi_power_transition(struct acpi_device *device, int state);
extern int acpi_power_nocheck;
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
int acpi_ec_ecdt_probe(void);
int acpi_boot_ec_enable(void);
/* --------------------------------------------------------------------------
Processor
-------------------------------------------------------------------------- */
@@ -165,9 +157,4 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
}
#endif
/*--------------------------------------------------------------------------
Suspend/Resume
-------------------------------------------------------------------------- */
extern int acpi_sleep_init(void);
#endif /*__ACPI_DRIVERS_H__*/

View File

@@ -242,10 +242,6 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
acpi_status acpi_os_validate_interface(char *interface);
acpi_status acpi_osi_invalidate(char* interface);
acpi_status
acpi_os_validate_address(u8 space_id, acpi_physical_address address,
acpi_size length, char *name);
u64 acpi_os_get_timer(void);
acpi_status acpi_os_signal(u32 function, void *info);

View File

@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20081204
#define ACPI_CA_VERSION 0x20090320
#include "actypes.h"
#include "actbl.h"
@@ -349,17 +349,15 @@ acpi_resource_to_address64(struct acpi_resource *resource,
*/
acpi_status acpi_reset(void);
acpi_status acpi_get_register(u32 register_id, u32 * return_value);
acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
acpi_status acpi_write_bit_register(u32 register_id, u32 value);
acpi_status acpi_set_register(u32 register_id, u32 value);
acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
acpi_status
acpi_set_firmware_waking_vector(u32 physical_address);
acpi_status
acpi_set_firmware_waking_vector64(u64 physical_address);
#if ACPI_MACHINE_WIDTH == 64
acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
#endif
acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);

View File

@@ -214,11 +214,11 @@ struct acpi_table_fadt {
u16 flush_size; /* Processor's memory cache line width, in bytes */
u16 flush_stride; /* Number of flush strides that need to be read */
u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */
u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
u8 century; /* Index to century in RTC CMOS RAM */
u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */
u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
u8 reserved; /* Reserved, must be zero */
u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
@@ -236,32 +236,41 @@ struct acpi_table_fadt {
struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
};
/* FADT Boot Architecture Flags (boot_flags) */
#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */
#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */
#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */
#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */
#define FADT2_REVISION_ID 3
/* FADT flags */
#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */
#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */
#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */
#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */
#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */
#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */
#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */
#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup possible from S4 */
#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */
#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */
#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */
#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */
#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */
#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */
#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */
#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */
#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */
#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */
#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */
#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */
#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */
#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */
#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */
#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */
#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status not in fixed register space */
#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */
#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */
#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */
#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */
#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */
#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */
#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */
#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */
#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
/* FADT Prefered Power Management Profiles */
/*
* FADT Prefered Power Management Profiles
*/
enum acpi_prefered_pm_profiles {
PM_UNSPECIFIED = 0,
PM_DESKTOP = 1,
@@ -272,16 +281,6 @@ enum acpi_prefered_pm_profiles {
PM_APPLIANCE_PC = 6
};
/* FADT Boot Arch Flags */
#define BAF_LEGACY_DEVICES 0x0001
#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
#define BAF_MSI_NOT_SUPPORTED 0x0008
#define BAF_PCIE_ASPM_CONTROL 0x0010
#define FADT2_REVISION_ID 3
#define FADT2_MINUS_REVISION_ID 2
/* Reset to default packing */
#pragma pack()
@@ -310,8 +309,9 @@ struct acpi_table_desc {
#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
#define ACPI_TABLE_ORIGIN_MAPPED (1)
#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
#define ACPI_TABLE_ORIGIN_MASK (3)
#define ACPI_TABLE_IS_LOADED (4)
#define ACPI_TABLE_ORIGIN_OVERRIDE (4)
#define ACPI_TABLE_ORIGIN_MASK (7)
#define ACPI_TABLE_IS_LOADED (8)
/*
* Get the remaining ACPI tables

View File

@@ -1016,9 +1016,9 @@ struct acpi_madt_interrupt_source {
struct acpi_madt_local_x2apic {
struct acpi_subtable_header header;
u16 reserved; /* Reserved - must be zero */
u32 local_apic_id; /* Processor X2_APIC ID */
u32 local_apic_id; /* Processor x2APIC ID */
u32 lapic_flags;
u32 uid; /* Extended X2_APIC processor ID */
u32 uid; /* ACPI processor UID */
};
/* 10: Local X2APIC NMI (07/2008) */
@@ -1026,7 +1026,7 @@ struct acpi_madt_local_x2apic {
struct acpi_madt_local_x2apic_nmi {
struct acpi_subtable_header header;
u16 inti_flags;
u32 uid; /* Processor X2_APIC ID */
u32 uid; /* ACPI processor UID */
u8 lint; /* LINTn to which NMI is connected */
u8 reserved[3];
};

View File

@@ -777,17 +777,25 @@ typedef u8 acpi_adr_space_type;
#define ACPI_BITREG_SCI_ENABLE 0x0E
#define ACPI_BITREG_BUS_MASTER_RLD 0x0F
#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10
#define ACPI_BITREG_SLEEP_TYPE_A 0x11
#define ACPI_BITREG_SLEEP_TYPE_B 0x12
#define ACPI_BITREG_SLEEP_ENABLE 0x13
#define ACPI_BITREG_SLEEP_TYPE 0x11
#define ACPI_BITREG_SLEEP_ENABLE 0x12
/* PM2 Control register */
#define ACPI_BITREG_ARB_DISABLE 0x14
#define ACPI_BITREG_ARB_DISABLE 0x13
#define ACPI_BITREG_MAX 0x14
#define ACPI_BITREG_MAX 0x13
#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1
/* Status register values. A 1 clears a status bit. 0 = no effect */
#define ACPI_CLEAR_STATUS 1
/* Enable and Control register values */
#define ACPI_ENABLE_EVENT 1
#define ACPI_DISABLE_EVENT 0
/*
* External ACPI object definition
*/

View File

@@ -322,7 +322,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
extern struct file_operations acpi_processor_throttling_fops;
extern const struct file_operations acpi_processor_throttling_fops;
extern void acpi_processor_throttling_init(void);
/* in processor_idle.c */
int acpi_processor_power_init(struct acpi_processor *pr,
@@ -336,7 +336,7 @@ extern struct cpuidle_driver acpi_idle_driver;
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern struct file_operations acpi_processor_limit_fops;
extern const struct file_operations acpi_processor_limit_fops;
extern struct thermal_cooling_device_ops processor_cooling_ops;
#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(void);

11
include/acpi/video.h Normal file
View File

@@ -0,0 +1,11 @@
#ifndef __ACPI_VIDEO_H
#define __ACPI_VIDEO_H
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
extern int acpi_video_register(void);
#else
static inline int acpi_video_register(void) { return 0; }
#endif
#endif

View File

@@ -1,77 +0,0 @@
/* arch/arm/mach-s3c2410/include/mach/regs-iis.h
*
* Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
* http://www.simtec.co.uk/products/SWLINUX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* S3C2410 IIS register definition
*/
#ifndef __ASM_ARCH_REGS_IIS_H
#define __ASM_ARCH_REGS_IIS_H
#define S3C2410_IISCON (0x00)
#define S3C2410_IISCON_LRINDEX (1<<8)
#define S3C2410_IISCON_TXFIFORDY (1<<7)
#define S3C2410_IISCON_RXFIFORDY (1<<6)
#define S3C2410_IISCON_TXDMAEN (1<<5)
#define S3C2410_IISCON_RXDMAEN (1<<4)
#define S3C2410_IISCON_TXIDLE (1<<3)
#define S3C2410_IISCON_RXIDLE (1<<2)
#define S3C2410_IISCON_PSCEN (1<<1)
#define S3C2410_IISCON_IISEN (1<<0)
#define S3C2410_IISMOD (0x04)
#define S3C2440_IISMOD_MPLL (1<<9)
#define S3C2410_IISMOD_SLAVE (1<<8)
#define S3C2410_IISMOD_NOXFER (0<<6)
#define S3C2410_IISMOD_RXMODE (1<<6)
#define S3C2410_IISMOD_TXMODE (2<<6)
#define S3C2410_IISMOD_TXRXMODE (3<<6)
#define S3C2410_IISMOD_LR_LLOW (0<<5)
#define S3C2410_IISMOD_LR_RLOW (1<<5)
#define S3C2410_IISMOD_IIS (0<<4)
#define S3C2410_IISMOD_MSB (1<<4)
#define S3C2410_IISMOD_8BIT (0<<3)
#define S3C2410_IISMOD_16BIT (1<<3)
#define S3C2410_IISMOD_BITMASK (1<<3)
#define S3C2410_IISMOD_256FS (0<<2)
#define S3C2410_IISMOD_384FS (1<<2)
#define S3C2410_IISMOD_16FS (0<<0)
#define S3C2410_IISMOD_32FS (1<<0)
#define S3C2410_IISMOD_48FS (2<<0)
#define S3C2410_IISMOD_FS_MASK (3<<0)
#define S3C2410_IISPSR (0x08)
#define S3C2410_IISPSR_INTMASK (31<<5)
#define S3C2410_IISPSR_INTSHIFT (5)
#define S3C2410_IISPSR_EXTMASK (31<<0)
#define S3C2410_IISPSR_EXTSHFIT (0)
#define S3C2410_IISFCON (0x0c)
#define S3C2410_IISFCON_TXDMA (1<<15)
#define S3C2410_IISFCON_RXDMA (1<<14)
#define S3C2410_IISFCON_TXENABLE (1<<13)
#define S3C2410_IISFCON_RXENABLE (1<<12)
#define S3C2410_IISFCON_TXMASK (0x3f << 6)
#define S3C2410_IISFCON_TXSHIFT (6)
#define S3C2410_IISFCON_RXMASK (0x3f)
#define S3C2410_IISFCON_RXSHIFT (0)
#define S3C2400_IISFCON_TXDMA (1<<11)
#define S3C2400_IISFCON_RXDMA (1<<10)
#define S3C2400_IISFCON_TXENABLE (1<<9)
#define S3C2400_IISFCON_RXENABLE (1<<8)
#define S3C2400_IISFCON_TXMASK (0x07 << 4)
#define S3C2400_IISFCON_TXSHIFT (4)
#define S3C2400_IISFCON_RXMASK (0x07)
#define S3C2400_IISFCON_RXSHIFT (0)
#define S3C2410_IISFIFO (0x10)
#endif /* __ASM_ARCH_REGS_IIS_H */

View File

@@ -1,72 +0,0 @@
/* linux/include/asm-arm/plat-s3c24xx/regs-s3c2412-iis.h
*
* Copyright 2007 Simtec Electronics <linux@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* S3C2412 IIS register definition
*/
#ifndef __ASM_ARCH_REGS_S3C2412_IIS_H
#define __ASM_ARCH_REGS_S3C2412_IIS_H
#define S3C2412_IISCON (0x00)
#define S3C2412_IISMOD (0x04)
#define S3C2412_IISFIC (0x08)
#define S3C2412_IISPSR (0x0C)
#define S3C2412_IISTXD (0x10)
#define S3C2412_IISRXD (0x14)
#define S3C2412_IISCON_LRINDEX (1 << 11)
#define S3C2412_IISCON_TXFIFO_EMPTY (1 << 10)
#define S3C2412_IISCON_RXFIFO_EMPTY (1 << 9)
#define S3C2412_IISCON_TXFIFO_FULL (1 << 8)
#define S3C2412_IISCON_RXFIFO_FULL (1 << 7)
#define S3C2412_IISCON_TXDMA_PAUSE (1 << 6)
#define S3C2412_IISCON_RXDMA_PAUSE (1 << 5)
#define S3C2412_IISCON_TXCH_PAUSE (1 << 4)
#define S3C2412_IISCON_RXCH_PAUSE (1 << 3)
#define S3C2412_IISCON_TXDMA_ACTIVE (1 << 2)
#define S3C2412_IISCON_RXDMA_ACTIVE (1 << 1)
#define S3C2412_IISCON_IIS_ACTIVE (1 << 0)
#define S3C2412_IISMOD_MASTER_INTERNAL (0 << 10)
#define S3C2412_IISMOD_MASTER_EXTERNAL (1 << 10)
#define S3C2412_IISMOD_SLAVE (2 << 10)
#define S3C2412_IISMOD_MASTER_MASK (3 << 10)
#define S3C2412_IISMOD_MODE_TXONLY (0 << 8)
#define S3C2412_IISMOD_MODE_RXONLY (1 << 8)
#define S3C2412_IISMOD_MODE_TXRX (2 << 8)
#define S3C2412_IISMOD_MODE_MASK (3 << 8)
#define S3C2412_IISMOD_LR_LLOW (0 << 7)
#define S3C2412_IISMOD_LR_RLOW (1 << 7)
#define S3C2412_IISMOD_SDF_IIS (0 << 5)
#define S3C2412_IISMOD_SDF_MSB (0 << 5)
#define S3C2412_IISMOD_SDF_LSB (0 << 5)
#define S3C2412_IISMOD_SDF_MASK (3 << 5)
#define S3C2412_IISMOD_RCLK_256FS (0 << 3)
#define S3C2412_IISMOD_RCLK_512FS (1 << 3)
#define S3C2412_IISMOD_RCLK_384FS (2 << 3)
#define S3C2412_IISMOD_RCLK_768FS (3 << 3)
#define S3C2412_IISMOD_RCLK_MASK (3 << 3)
#define S3C2412_IISMOD_BCLK_32FS (0 << 1)
#define S3C2412_IISMOD_BCLK_48FS (1 << 1)
#define S3C2412_IISMOD_BCLK_16FS (2 << 1)
#define S3C2412_IISMOD_BCLK_24FS (3 << 1)
#define S3C2412_IISMOD_BCLK_MASK (3 << 1)
#define S3C2412_IISMOD_8BIT (1 << 0)
#define S3C2412_IISPSR_PSREN (1 << 15)
#define S3C2412_IISFIC_TXFLUSH (1 << 15)
#define S3C2412_IISFIC_RXFLUSH (1 << 7)
#define S3C2412_IISFIC_TXCOUNT(x) (((x) >> 8) & 0xf)
#define S3C2412_IISFIC_RXCOUNT(x) (((x) >> 0) & 0xf)
#endif /* __ASM_ARCH_REGS_S3C2412_IIS_H */

1
include/asm-frv/ftrace.h Normal file
View File

@@ -0,0 +1 @@
/* empty */

View File

@@ -18,6 +18,7 @@
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/mem-layout.h>
#include <asm/spr-regs.h>
#include <asm/mb-regs.h>
@@ -116,6 +117,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long paddr;
pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page);
switch (type) {

View File

@@ -1,24 +0,0 @@
/* ide.h: FRV IDE declarations
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_IDE_H
#define _ASM_IDE_H
#ifdef __KERNEL__
#include <asm/setup.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
#endif /* _ASM_IDE_H */

View File

@@ -54,5 +54,8 @@
#define SO_MARK 36
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#endif /* _ASM_SOCKET_H */

View File

@@ -1,308 +0,0 @@
/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
*
* Implements the generic device dma API via the existing pci_ one
* for unconverted architectures
*/
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
#ifdef CONFIG_PCI
/* we implement the API below in terms of the existing PCI one,
* so include it */
#include <linux/pci.h>
/* need struct page definitions */
#include <linux/mm.h>
static inline int
dma_supported(struct device *dev, u64 mask)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_dma_supported(to_pci_dev(dev), mask);
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
size, (int)direction);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
}
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
}
#else
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 0;
}
static inline int
dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG();
return 0;
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{
BUG();
return NULL;
}
static inline void
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG();
}
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
BUG();
return 0;
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
BUG();
}
static inline int
dma_error(dma_addr_t dma_addr)
{
return 0;
}
#endif
/* Now for the API extensions over the pci_ one */
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline int
dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all processors, so return
* the maximum possible, to be safe */
return (1 << INTERNODE_CACHE_SHIFT);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
/* just sync everything, that's all the pci API can do */
dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
/* could define this in terms of the dma_cache ... operations,
* but if you get this on a platform, you should convert the platform
* to using the generic device DMA API */
BUG();
}
#endif

View File

@@ -117,9 +117,9 @@
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
__ARCH_FLOCK_PAD
};
#endif
@@ -140,9 +140,9 @@ struct flock {
struct flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
__ARCH_FLOCK64_PAD
};
#endif

View File

@@ -55,6 +55,10 @@ struct module;
* handled is (base + ngpio - 1).
* @can_sleep: flag must be set iff get()/set() methods sleep, as they
* must while accessing GPIO expander chips over I2C or SPI
* @names: if set, must be an array of strings to use as alternative
* names for the GPIOs in this chip. Any entry in the array
* may be NULL if there is no alias for the GPIO, however the
* array must be @ngpio entries long.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -92,6 +96,7 @@ struct gpio_chip {
struct gpio_chip *chip);
int base;
u16 ngpio;
char **names;
unsigned can_sleep:1;
unsigned exported:1;
};

View File

@@ -23,7 +23,7 @@ typedef union sigval {
#endif
#ifndef __ARCH_SI_UID_T
#define __ARCH_SI_UID_T uid_t
#define __ARCH_SI_UID_T __kernel_uid32_t
#endif
/*
@@ -47,13 +47,13 @@ typedef struct siginfo {
/* kill() */
struct {
pid_t _pid; /* sender's pid */
__kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
timer_t _tid; /* timer id */
__kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
@@ -62,18 +62,18 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
pid_t _pid; /* sender's pid */
__kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
__kernel_pid_t _pid; /* which child */
__ARCH_SI_UID_T _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
__kernel_clock_t _utime;
__kernel_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */

View File

@@ -1,8 +1,9 @@
#ifndef _GENERIC_STATFS_H
#define _GENERIC_STATFS_H
#ifndef __KERNEL_STRICT_NAMES
# include <linux/types.h>
#include <linux/types.h>
#ifdef __KERNEL__
typedef __kernel_fsid_t fsid_t;
#endif

View File

@@ -43,20 +43,10 @@
#ifndef cpumask_of_node
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
#endif
#ifndef node_to_first_cpu
#define node_to_first_cpu(node) ((void)(node),0)
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
#endif
#ifndef pcibus_to_cpumask
#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
CPU_MASK_ALL : \
node_to_cpumask(pcibus_to_node(bus)) \
)
#endif
#ifndef cpumask_of_pcibus
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \

View File

@@ -61,6 +61,30 @@
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_EVENT_TRACER
#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
*(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#else
#define TRACE_PRINTKS()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
*(__syscalls_metadata) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
#define TRACE_SYSCALLS()
#endif
/* .data section */
#define DATA_DATA \
*(.data) \
@@ -80,8 +104,16 @@
VMLINUX_SYMBOL(__start___tracepoints) = .; \
*(__tracepoints) \
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
*(__verbose) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE()
BRANCH_PROFILE() \
TRACE_PRINTKS() \
FTRACE_EVENTS() \
TRACE_SYSCALLS()
#define RO_DATA(align) \
. = ALIGN((align)); \
@@ -309,15 +341,7 @@
CPU_DISCARD(init.data) \
CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.data) \
MEM_DISCARD(init.rodata) \
/* implement dynamic printk debug */ \
VMLINUX_SYMBOL(__start___verbose_strings) = .; \
*(__verbose_strings) \
VMLINUX_SYMBOL(__stop___verbose_strings) = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
*(__verbose) \
VMLINUX_SYMBOL(__stop___verbose) = .;
MEM_DISCARD(init.rodata)
#define INIT_TEXT \
*(.init.text) \

View File

@@ -0,0 +1 @@
/* empty */

View File

@@ -54,4 +54,7 @@
#define SO_MARK 36
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#endif /* _ASM_M32R_SOCKET_H */

View File

@@ -316,6 +316,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
return 0;
}
#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define _raw_spin_relax(lock) cpu_relax()
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()

View File

@@ -0,0 +1 @@
/* empty */

View File

@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
@@ -77,6 +78,7 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
if (page < highmem_start_page)
return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG

View File

@@ -1,39 +0,0 @@
/* MN10300 Arch-specific IDE code
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from include/asm-i386/ide.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_IDE_H
#define _ASM_IDE_H
#ifdef __KERNEL__
#include <asm/intctl-regs.h>
#undef SUPPORT_SLOW_DATA_PORTS
#define SUPPORT_SLOW_DATA_PORTS 0
#undef SUPPORT_VLB_SYNC
#define SUPPORT_VLB_SYNC 0
/*
* some bits needed for parts of the IDE subsystem to compile
*/
#define __ide_mm_insw(port, addr, n) \
insw((unsigned long) (port), (addr), (n))
#define __ide_mm_insl(port, addr, n) \
insl((unsigned long) (port), (addr), (n))
#define __ide_mm_outsw(port, addr, n) \
outsw((unsigned long) (port), (addr), (n))
#define __ide_mm_outsl(port, addr, n) \
outsl((unsigned long) (port), (addr), (n))
#endif /* __KERNEL__ */
#endif /* _ASM_IDE_H */

View File

@@ -121,4 +121,9 @@ pcibios_select_root(struct pci_dev *pdev, struct resource *res)
#define pcibios_scan_all_fns(a, b) 0
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
return channel ? 15 : 14;
}
#endif /* _ASM_PCI_H */

View File

@@ -54,4 +54,7 @@
#define SO_MARK 36
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#endif /* _ASM_SOCKET_H */

View File

@@ -17,10 +17,14 @@
#define AES_MAX_KEYLENGTH (15 * 16)
#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
/*
* Please ensure that the first two fields are 16-byte aligned
* relative to the start of the structure, i.e., don't move them!
*/
struct crypto_aes_ctx {
u32 key_length;
u32 key_enc[AES_MAX_KEYLENGTH_U32];
u32 key_dec[AES_MAX_KEYLENGTH_U32];
u32 key_length;
};
extern const u32 crypto_ft_tab[4][256];

145
include/crypto/compress.h Normal file
View File

@@ -0,0 +1,145 @@
/*
* Compress: Compression algorithms under the cryptographic API.
*
* Copyright 2008 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _CRYPTO_COMPRESS_H
#define _CRYPTO_COMPRESS_H
#include <linux/crypto.h>
struct comp_request {
const void *next_in; /* next input byte */
void *next_out; /* next output byte */
unsigned int avail_in; /* bytes available at next_in */
unsigned int avail_out; /* bytes available at next_out */
};
enum zlib_comp_params {
ZLIB_COMP_LEVEL = 1, /* e.g. Z_DEFAULT_COMPRESSION */
ZLIB_COMP_METHOD, /* e.g. Z_DEFLATED */
ZLIB_COMP_WINDOWBITS, /* e.g. MAX_WBITS */
ZLIB_COMP_MEMLEVEL, /* e.g. DEF_MEM_LEVEL */
ZLIB_COMP_STRATEGY, /* e.g. Z_DEFAULT_STRATEGY */
__ZLIB_COMP_MAX,
};
#define ZLIB_COMP_MAX (__ZLIB_COMP_MAX - 1)
enum zlib_decomp_params {
ZLIB_DECOMP_WINDOWBITS = 1, /* e.g. DEF_WBITS */
__ZLIB_DECOMP_MAX,
};
#define ZLIB_DECOMP_MAX (__ZLIB_DECOMP_MAX - 1)
struct crypto_pcomp {
struct crypto_tfm base;
};
struct pcomp_alg {
int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
unsigned int len);
int (*compress_init)(struct crypto_pcomp *tfm);
int (*compress_update)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*compress_final)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
unsigned int len);
int (*decompress_init)(struct crypto_pcomp *tfm);
int (*decompress_update)(struct crypto_pcomp *tfm,
struct comp_request *req);
int (*decompress_final)(struct crypto_pcomp *tfm,
struct comp_request *req);
struct crypto_alg base;
};
extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
u32 mask);
static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
{
return &tfm->base;
}
static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
{
crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
}
static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct pcomp_alg, base);
}
static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
{
return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
}
static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
}
static inline int crypto_compress_init(struct crypto_pcomp *tfm)
{
return crypto_pcomp_alg(tfm)->compress_init(tfm);
}
static inline int crypto_compress_update(struct crypto_pcomp *tfm,
struct comp_request *req)
{
return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
}
static inline int crypto_compress_final(struct crypto_pcomp *tfm,
struct comp_request *req)
{
return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
}
static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
void *params, unsigned int len)
{
return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
}
static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
{
return crypto_pcomp_alg(tfm)->decompress_init(tfm);
}
static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
struct comp_request *req)
{
return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
}
static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
struct comp_request *req)
{
return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
}
#endif /* _CRYPTO_COMPRESS_H */

27
include/crypto/cryptd.h Normal file
View File

@@ -0,0 +1,27 @@
/*
* Software async crypto daemon
*/
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
#include <linux/crypto.h>
#include <linux/kernel.h>
struct cryptd_ablkcipher {
struct crypto_ablkcipher base;
};
static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
struct crypto_ablkcipher *tfm)
{
return (struct cryptd_ablkcipher *)tfm;
}
/* alg_name should be algorithm to be cryptd-ed */
struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
u32 type, u32 mask);
struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
#endif

View File

@@ -0,0 +1,7 @@
#ifndef CRYPTO_WQ_H
#define CRYPTO_WQ_H
#include <linux/workqueue.h>
extern struct workqueue_struct *kcrypto_wq;
#endif

View File

@@ -231,6 +231,11 @@ static inline unsigned int crypto_shash_alignmask(
return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
}
static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
{
return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
}
static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
{
return container_of(alg, struct shash_alg, base);

View File

@@ -0,0 +1,28 @@
/*
* Compress: Compression algorithms under the cryptographic API.
*
* Copyright 2008 Sony Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
* If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _CRYPTO_INTERNAL_COMPRESS_H
#define _CRYPTO_INTERNAL_COMPRESS_H
#include <crypto/compress.h>
extern int crypto_register_pcomp(struct pcomp_alg *alg);
extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
#endif /* _CRYPTO_INTERNAL_COMPRESS_H */

View File

@@ -36,8 +36,7 @@
#ifndef _DRM_H_
#define _DRM_H_
#if defined(__KERNEL__)
#endif
#include <linux/types.h>
#include <asm/ioctl.h> /* For _IO* macros */
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IOC_VOID _IOC_NONE
@@ -497,8 +496,8 @@ union drm_wait_vblank {
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
uint32_t crtc;
uint32_t cmd;
__u32 crtc;
__u32 cmd;
};
/**
@@ -574,29 +573,29 @@ struct drm_set_version {
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
};
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
uint32_t handle;
__u32 handle;
/** Returned global name */
uint32_t name;
__u32 name;
};
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
uint32_t name;
__u32 name;
/** Returned handle for the object */
uint32_t handle;
__u32 handle;
/** Returned size of the object */
uint64_t size;
__u64 size;
};
#include "drm_mode.h"

View File

@@ -281,16 +281,16 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
struct drm_ioctl_desc {
unsigned int cmd;
drm_ioctl_t *func;
int flags;
drm_ioctl_t *func;
};
/**
* Creates a driver or general drm_ioctl_desc array entry for the given
* ioctl, for use by drm_ioctl().
*/
#define DRM_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
struct drm_magic_entry {
struct list_head head;
@@ -522,20 +522,33 @@ struct drm_mm {
};
/**
* Kernel side of a mapping
*/
struct drm_local_map {
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
};
typedef struct drm_local_map drm_local_map_t;
/**
* Mappings list
*/
struct drm_map_list {
struct list_head head; /**< list head */
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
struct drm_local_map *map; /**< mapping */
uint64_t user_token;
struct drm_master *master;
struct drm_mm_node *file_offset_node; /**< fake offset */
};
typedef struct drm_map drm_local_map_t;
/**
* Context handle list
*/
@@ -560,7 +573,7 @@ struct drm_ati_pcigart_info {
dma_addr_t bus_addr;
dma_addr_t table_mask;
struct drm_dma_handle *table_handle;
drm_local_map_t mapping;
struct drm_local_map mapping;
int table_size;
};
@@ -675,7 +688,6 @@ struct drm_driver {
int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
void (*kernel_context_switch_unlock) (struct drm_device *dev);
int (*dri_library_name) (struct drm_device *dev, char *buf);
/**
* get_vblank_counter - get raw hardware vblank counter
@@ -747,8 +759,8 @@ struct drm_driver {
struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv);
unsigned long (*get_map_ofs) (struct drm_map * map);
unsigned long (*get_reg_ofs) (struct drm_device *dev);
resource_size_t (*get_map_ofs) (struct drm_local_map * map);
resource_size_t (*get_reg_ofs) (struct drm_device *dev);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
@@ -758,6 +770,8 @@ struct drm_driver {
int (*proc_init)(struct drm_minor *minor);
void (*proc_cleanup)(struct drm_minor *minor);
int (*debugfs_init)(struct drm_minor *minor);
void (*debugfs_cleanup)(struct drm_minor *minor);
/**
* Driver-specific constructor for drm_gem_objects, to set up
@@ -793,6 +807,48 @@ struct drm_driver {
#define DRM_MINOR_CONTROL 2
#define DRM_MINOR_RENDER 3
/**
* debugfs node list. This structure represents a debugfs file to
* be created by the drm core
*/
struct drm_debugfs_list {
const char *name; /** file name */
int (*show)(struct seq_file*, void*); /** show callback */
u32 driver_features; /**< Required driver features for this entry */
};
/**
* debugfs node structure. This structure represents a debugfs file.
*/
struct drm_debugfs_node {
struct list_head list;
struct drm_minor *minor;
struct drm_debugfs_list *debugfs_ent;
struct dentry *dent;
};
/**
* Info file list entry. This structure represents a debugfs or proc file to
* be created by the drm core
*/
struct drm_info_list {
const char *name; /** file name */
int (*show)(struct seq_file*, void*); /** show callback */
u32 driver_features; /**< Required driver features for this entry */
void *data;
};
/**
* debugfs node structure. This structure represents a debugfs file.
*/
struct drm_info_node {
struct list_head list;
struct drm_minor *minor;
struct drm_info_list *info_ent;
struct dentry *dent;
};
/**
* DRM minor structure. This structure represents a drm minor number.
*/
@@ -802,7 +858,12 @@ struct drm_minor {
dev_t device; /**< Device number for mknod */
struct device kdev; /**< Linux device */
struct drm_device *dev;
struct proc_dir_entry *dev_root; /**< proc directory entry */
struct proc_dir_entry *proc_root; /**< proc directory entry */
struct drm_info_node proc_nodes;
struct dentry *debugfs_root;
struct drm_info_node debugfs_nodes;
struct drm_master *master; /* currently active master for this node */
struct list_head master_list;
struct drm_mode_group mode_group;
@@ -932,7 +993,7 @@ struct drm_device {
sigset_t sigmask;
struct drm_driver *driver;
drm_local_map_t *agp_buffer_map;
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
@@ -1049,8 +1110,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@@ -1153,13 +1214,13 @@ extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv
/* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
extern int drm_addmap(struct drm_device *dev, unsigned int offset,
extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, drm_local_map_t ** map_ptr);
enum drm_map_flags flags, struct drm_local_map **map_ptr);
extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_addbufs(struct drm_device *dev, void *data,
@@ -1173,10 +1234,10 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
extern int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_order(unsigned long size);
extern unsigned long drm_get_resource_start(struct drm_device *dev,
extern resource_size_t drm_get_resource_start(struct drm_device *dev,
unsigned int resource);
extern resource_size_t drm_get_resource_len(struct drm_device *dev,
unsigned int resource);
extern unsigned long drm_get_resource_len(struct drm_device *dev,
unsigned int resource);
/* DMA support (drm_dma.h) */
extern int drm_dma_setup(struct drm_device *dev);
@@ -1252,22 +1313,48 @@ extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
extern int drm_put_dev(struct drm_device *dev);
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern struct dentry *drm_debugfs_root;
extern struct idr drm_minors_idr;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
/* Proc support (drm_proc.h) */
extern int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root);
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
extern int drm_debugfs_create_files(struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor);
extern int drm_debugfs_remove_files(struct drm_info_list *files, int count,
struct drm_minor *minor);
extern int drm_debugfs_cleanup(struct drm_minor *minor);
#endif
/* Info file support */
extern int drm_name_info(struct seq_file *m, void *data);
extern int drm_vm_info(struct seq_file *m, void *data);
extern int drm_queues_info(struct seq_file *m, void *data);
extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
extern int drm_gem_name_info(struct seq_file *m, void *data);
extern int drm_gem_object_info(struct seq_file *m, void* data);
#if DRM_DEBUG_CODE
extern int drm_vma_info(struct seq_file *m, void *data);
#endif
/* Scatter Gather Support (drm_scatter.h) */
extern void drm_sg_cleanup(struct drm_sg_mem * entry);
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
@@ -1378,12 +1465,12 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
@@ -1410,7 +1497,7 @@ static __inline__ int drm_device_is_pcie(struct drm_device *dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
static __inline__ void drm_core_dropmap(struct drm_map *map)
static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
}

View File

@@ -550,7 +550,7 @@ struct drm_mode_config {
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
unsigned long fb_base;
resource_size_t fb_base;
/* pointers to standard properties */
struct list_head property_blob_list;
@@ -613,7 +613,8 @@ extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter);
extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
unsigned char *buf, int len);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -731,4 +732,5 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
#endif /* __DRM_CRTC_H__ */

View File

@@ -33,7 +33,6 @@
#ifndef __DRM_CRTC_HELPER_H__
#define __DRM_CRTC_HELPER_H__
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/idr.h>
@@ -92,7 +91,7 @@ struct drm_connector_helper_funcs {
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
extern void drm_helper_disable_unused_functions(struct drm_device *dev);
extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow);
extern bool drm_helper_initial_config(struct drm_device *dev);
extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,

View File

@@ -27,11 +27,8 @@
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
#if !defined(__KERNEL__) && !defined(_KERNEL)
#include <stdint.h>
#else
#include <linux/kernel.h>
#endif
#include <linux/types.h>
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
@@ -81,41 +78,41 @@
#define DRM_MODE_DITHERING_ON 1
struct drm_mode_modeinfo {
uint32_t clock;
uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
uint32_t vrefresh; /* vertical refresh * 1000 */
__u32 vrefresh; /* vertical refresh * 1000 */
uint32_t flags;
uint32_t type;
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
struct drm_mode_card_res {
uint64_t fb_id_ptr;
uint64_t crtc_id_ptr;
uint64_t connector_id_ptr;
uint64_t encoder_id_ptr;
uint32_t count_fbs;
uint32_t count_crtcs;
uint32_t count_connectors;
uint32_t count_encoders;
uint32_t min_width, max_width;
uint32_t min_height, max_height;
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
struct drm_mode_crtc {
uint64_t set_connectors_ptr;
uint32_t count_connectors;
__u64 set_connectors_ptr;
__u32 count_connectors;
uint32_t crtc_id; /**< Id */
uint32_t fb_id; /**< Id of framebuffer */
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
uint32_t x, y; /**< Position on the frameuffer */
__u32 x, y; /**< Position on the frameuffer */
uint32_t gamma_size;
uint32_t mode_valid;
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
@@ -126,13 +123,13 @@ struct drm_mode_crtc {
#define DRM_MODE_ENCODER_TVDAC 4
struct drm_mode_get_encoder {
uint32_t encoder_id;
uint32_t encoder_type;
__u32 encoder_id;
__u32 encoder_type;
uint32_t crtc_id; /**< Id of crtc */
__u32 crtc_id; /**< Id of crtc */
uint32_t possible_crtcs;
uint32_t possible_clones;
__u32 possible_crtcs;
__u32 possible_clones;
};
/* This is for connectors with multiple signal types. */
@@ -161,23 +158,23 @@ struct drm_mode_get_encoder {
struct drm_mode_get_connector {
uint64_t encoders_ptr;
uint64_t modes_ptr;
uint64_t props_ptr;
uint64_t prop_values_ptr;
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
uint32_t count_modes;
uint32_t count_props;
uint32_t count_encoders;
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
uint32_t encoder_id; /**< Current Encoder */
uint32_t connector_id; /**< Id */
uint32_t connector_type;
uint32_t connector_type_id;
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
uint32_t connection;
uint32_t mm_width, mm_height; /**< HxW in millimeters */
uint32_t subpixel;
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
#define DRM_MODE_PROP_PENDING (1<<0)
@@ -187,46 +184,46 @@ struct drm_mode_get_connector {
#define DRM_MODE_PROP_BLOB (1<<4)
struct drm_mode_property_enum {
uint64_t value;
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
struct drm_mode_get_property {
uint64_t values_ptr; /* values and blob lengths */
uint64_t enum_blob_ptr; /* enum and blob id ptrs */
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
uint32_t prop_id;
uint32_t flags;
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
uint32_t count_values;
uint32_t count_enum_blobs;
__u32 count_values;
__u32 count_enum_blobs;
};
struct drm_mode_connector_set_property {
uint64_t value;
uint32_t prop_id;
uint32_t connector_id;
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
struct drm_mode_get_blob {
uint32_t blob_id;
uint32_t length;
uint64_t data;
__u32 blob_id;
__u32 length;
__u64 data;
};
struct drm_mode_fb_cmd {
uint32_t fb_id;
uint32_t width, height;
uint32_t pitch;
uint32_t bpp;
uint32_t depth;
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
uint32_t handle;
__u32 handle;
};
struct drm_mode_mode_cmd {
uint32_t connector_id;
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
@@ -248,24 +245,24 @@ struct drm_mode_mode_cmd {
* y
*/
struct drm_mode_cursor {
uint32_t flags;
uint32_t crtc_id;
int32_t x;
int32_t y;
uint32_t width;
uint32_t height;
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
uint32_t handle;
__u32 handle;
};
struct drm_mode_crtc_lut {
uint32_t crtc_id;
uint32_t gamma_size;
__u32 crtc_id;
__u32 gamma_size;
/* pointers to arrays */
uint64_t red;
uint64_t green;
uint64_t blue;
__u64 red;
__u64 green;
__u64 blue;
};
#endif

View File

@@ -6,6 +6,19 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#ifndef readq
static inline u64 readq(void __iomem *reg)
{
return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
}
static inline void writeq(u64 val, void __iomem *reg)
{
writel(val & 0xffffffff, reg);
writel(val >> 32, reg + 0x4UL);
}
#endif
/** Current process ID */
#define DRM_CURRENTPID task_pid_nr(current)
#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
@@ -23,6 +36,12 @@
/** Write a dword into a MMIO region */
#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
/** Read memory barrier */
/** Read a qword from a MMIO region - be careful using these unless you really understand them */
#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
/** Write a qword into a MMIO region */
#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
#define DRM_READMEMORYBARRIER() rmb()
/** Write memory barrier */
#define DRM_WRITEMEMORYBARRIER() wmb()

View File

@@ -239,10 +239,123 @@
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
{0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
{0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
{0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
@@ -418,4 +531,6 @@
{0x8086, 0x2e02, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}

View File

@@ -30,7 +30,7 @@
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
#include <linux/types.h>
#include "drm.h"
/* Each region is a minimum of 16k, and there are at most 255 of them.
@@ -116,15 +116,15 @@ typedef struct _drm_i915_sarea {
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
uint32_t unused1, unused2, unused3;
__u32 unused1, unused2, unused3;
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
uint32_t front_bo_handle;
uint32_t back_bo_handle;
uint32_t unused_bo_handle;
uint32_t depth_bo_handle;
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
} drm_i915_sarea_t;
@@ -327,7 +327,7 @@ typedef struct drm_i915_vblank_swap {
} drm_i915_vblank_swap_t;
typedef struct drm_i915_hws_addr {
uint64_t addr;
__u64 addr;
} drm_i915_hws_addr_t;
struct drm_i915_gem_init {
@@ -335,12 +335,12 @@ struct drm_i915_gem_init {
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_start;
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
uint64_t gtt_end;
__u64 gtt_end;
};
struct drm_i915_gem_create {
@@ -349,94 +349,94 @@ struct drm_i915_gem_create {
*
* The (page-aligned) allocated size for the object will be returned.
*/
uint64_t size;
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
uint64_t offset;
__u64 offset;
/** Length of data to read */
uint64_t size;
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t data_ptr;
__u64 data_ptr;
};
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
uint64_t offset;
__u64 offset;
/** Length of data to write */
uint64_t size;
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t data_ptr;
__u64 data_ptr;
};
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
uint64_t offset;
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
uint64_t size;
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t addr_ptr;
__u64 addr_ptr;
};
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
uint64_t offset;
__u64 offset;
};
struct drm_i915_gem_set_domain {
/** Handle for the object */
uint32_t handle;
__u32 handle;
/** New read domains */
uint32_t read_domains;
__u32 read_domains;
/** New write domain */
uint32_t write_domain;
__u32 write_domain;
};
struct drm_i915_gem_sw_finish {
/** Handle for the object */
uint32_t handle;
__u32 handle;
};
struct drm_i915_gem_relocation_entry {
@@ -448,16 +448,16 @@ struct drm_i915_gem_relocation_entry {
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
uint32_t target_handle;
__u32 target_handle;
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
uint32_t delta;
__u32 delta;
/** Offset in the buffer the relocation entry will be written into */
uint64_t offset;
__u64 offset;
/**
* Offset value of the target buffer that the relocation entry was last
@@ -467,12 +467,12 @@ struct drm_i915_gem_relocation_entry {
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
uint64_t presumed_offset;
__u64 presumed_offset;
/**
* Target memory domains read by this operation.
*/
uint32_t read_domains;
__u32 read_domains;
/**
* Target memory domains written by this operation.
@@ -481,7 +481,7 @@ struct drm_i915_gem_relocation_entry {
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
uint32_t write_domain;
__u32 write_domain;
};
/** @{
@@ -512,24 +512,24 @@ struct drm_i915_gem_exec_object {
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
uint32_t handle;
__u32 handle;
/** Number of relocations to be performed on this buffer */
uint32_t relocation_count;
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
uint64_t relocs_ptr;
__u64 relocs_ptr;
/** Required alignment in graphics aperture */
uint64_t alignment;
__u64 alignment;
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
uint64_t offset;
__u64 offset;
};
struct drm_i915_gem_execbuffer {
@@ -543,44 +543,44 @@ struct drm_i915_gem_execbuffer {
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
uint64_t buffers_ptr;
uint32_t buffer_count;
__u64 buffers_ptr;
__u32 buffer_count;
/** Offset in the batchbuffer to start execution from. */
uint32_t batch_start_offset;
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
uint32_t batch_len;
uint32_t DR1;
uint32_t DR4;
uint32_t num_cliprects;
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
uint64_t cliprects_ptr;
__u64 cliprects_ptr;
};
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
/** alignment required within the aperture */
uint64_t alignment;
__u64 alignment;
/** Returned GTT offset of the buffer. */
uint64_t offset;
__u64 offset;
};
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
uint32_t handle;
uint32_t pad;
__u32 handle;
__u32 pad;
};
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
uint32_t handle;
__u32 handle;
/** Return busy status (1 if busy, 0 if idle) */
uint32_t busy;
__u32 busy;
};
#define I915_TILING_NONE 0
@@ -597,7 +597,7 @@ struct drm_i915_gem_busy {
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
uint32_t handle;
__u32 handle;
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
@@ -611,47 +611,47 @@ struct drm_i915_gem_set_tiling {
*
* Buffer contents become undefined when changing tiling_mode.
*/
uint32_t tiling_mode;
__u32 tiling_mode;
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
uint32_t stride;
__u32 stride;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
__u32 swizzle_mode;
};
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
uint32_t handle;
__u32 handle;
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
uint32_t tiling_mode;
__u32 tiling_mode;
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
uint32_t swizzle_mode;
__u32 swizzle_mode;
};
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
uint64_t aper_size;
__u64 aper_size;
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
uint64_t aper_available_size;
__u64 aper_available_size;
};
#endif /* _I915_DRM_H_ */

View File

@@ -35,6 +35,8 @@
#ifndef __MGA_DRM_H__
#define __MGA_DRM_H__
#include <linux/types.h>
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (mga_sarea.h)
*/
@@ -255,8 +257,8 @@ typedef struct _drm_mga_sarea {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, __u32)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, __u32)
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
typedef struct _drm_mga_warp_index {
@@ -310,7 +312,7 @@ typedef struct drm_mga_dma_bootstrap {
*/
/*@{ */
unsigned long texture_handle; /**< Handle used to map AGP textures. */
uint32_t texture_size; /**< Size of the AGP texture region. */
__u32 texture_size; /**< Size of the AGP texture region. */
/*@} */
/**
@@ -319,7 +321,7 @@ typedef struct drm_mga_dma_bootstrap {
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
uint32_t primary_size;
__u32 primary_size;
/**
* Requested number of secondary DMA buffers.
@@ -329,7 +331,7 @@ typedef struct drm_mga_dma_bootstrap {
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
uint32_t secondary_bin_count;
__u32 secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
@@ -338,7 +340,7 @@ typedef struct drm_mga_dma_bootstrap {
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
*/
uint32_t secondary_bin_size;
__u32 secondary_bin_size;
/**
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
@@ -350,12 +352,12 @@ typedef struct drm_mga_dma_bootstrap {
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.
*/
uint32_t agp_mode;
__u32 agp_mode;
/**
* Desired AGP GART size, measured in megabytes.
*/
uint8_t agp_size;
__u8 agp_size;
} drm_mga_dma_bootstrap_t;
typedef struct drm_mga_clear {

View File

@@ -33,6 +33,8 @@
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
#include <linux/types.h>
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
@@ -304,6 +306,8 @@ typedef union {
#define RADEON_SCRATCH_REG_OFFSET 32
#define R600_SCRATCH_REG_OFFSET 256
#define RADEON_NR_SAREA_CLIPRECTS 12
/* There are 2 heaps (local/GART). Each region within a heap is a
@@ -526,7 +530,8 @@ typedef struct drm_radeon_init {
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02,
RADEON_INIT_R200_CP = 0x03,
RADEON_INIT_R300_CP = 0x04
RADEON_INIT_R300_CP = 0x04,
RADEON_INIT_R600_CP = 0x05
} func;
unsigned long sarea_priv_offset;
int is_pci;
@@ -722,7 +727,7 @@ typedef struct drm_radeon_irq_wait {
typedef struct drm_radeon_setparam {
unsigned int param;
int64_t value;
__s64 value;
} drm_radeon_setparam_t;
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */

View File

@@ -24,6 +24,8 @@
#ifndef _VIA_DRM_H_
#define _VIA_DRM_H_
#include <linux/types.h>
/* WARNING: These defines must be the same as what the Xserver uses.
* if you change them, you must change the defines in the Xserver.
*/
@@ -114,19 +116,19 @@
#define VIA_MEM_UNKNOWN 4
typedef struct {
uint32_t offset;
uint32_t size;
__u32 offset;
__u32 size;
} drm_via_agp_t;
typedef struct {
uint32_t offset;
uint32_t size;
__u32 offset;
__u32 size;
} drm_via_fb_t;
typedef struct {
uint32_t context;
uint32_t type;
uint32_t size;
__u32 context;
__u32 type;
__u32 size;
unsigned long index;
unsigned long offset;
} drm_via_mem_t;
@@ -148,9 +150,9 @@ typedef struct _drm_via_futex {
VIA_FUTEX_WAIT = 0x00,
VIA_FUTEX_WAKE = 0X01
} func;
uint32_t ms;
uint32_t lock;
uint32_t val;
__u32 ms;
__u32 lock;
__u32 val;
} drm_via_futex_t;
typedef struct _drm_via_dma_init {
@@ -211,7 +213,7 @@ typedef struct _drm_via_cmdbuf_size {
VIA_CMDBUF_LAG = 0x02
} func;
int wait;
uint32_t size;
__u32 size;
} drm_via_cmdbuf_size_t;
typedef enum {
@@ -236,8 +238,8 @@ enum drm_via_irqs {
struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
uint32_t sequence;
uint32_t signal;
__u32 sequence;
__u32 signal;
};
typedef union drm_via_irqwait {
@@ -246,7 +248,7 @@ typedef union drm_via_irqwait {
} drm_via_irqwait_t;
typedef struct drm_via_blitsync {
uint32_t sync_handle;
__u32 sync_handle;
unsigned engine;
} drm_via_blitsync_t;
@@ -257,16 +259,16 @@ typedef struct drm_via_blitsync {
*/
typedef struct drm_via_dmablit {
uint32_t num_lines;
uint32_t line_length;
__u32 num_lines;
__u32 line_length;
uint32_t fb_addr;
uint32_t fb_stride;
__u32 fb_addr;
__u32 fb_stride;
unsigned char *mem_addr;
uint32_t mem_stride;
__u32 mem_stride;
uint32_t flags;
__u32 flags;
int to_fb;
drm_via_blitsync_t sync;

View File

@@ -67,6 +67,7 @@ header-y += falloc.h
header-y += fd.h
header-y += fdreg.h
header-y += fib_rules.h
header-y += fiemap.h
header-y += firewire-cdev.h
header-y += firewire-constants.h
header-y += fuse.h
@@ -115,6 +116,7 @@ header-y += mqueue.h
header-y += mtio.h
header-y += ncp_no.h
header-y += neighbour.h
header-y += net_dropmon.h
header-y += netfilter_arp.h
header-y += netrom.h
header-y += nfs2.h
@@ -157,8 +159,6 @@ header-y += ultrasound.h
header-y += un.h
header-y += utime.h
header-y += veth.h
header-y += video_decoder.h
header-y += video_encoder.h
header-y += videotext.h
header-y += x25.h

View File

@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
/* the following four functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
void acpi_numa_arch_fixup(void);
@@ -257,6 +258,40 @@ void __init acpi_no_s4_hw_signature(void);
void __init acpi_old_suspend_ordering(void);
void __init acpi_s4_no_nvs(void);
#endif /* CONFIG_PM_SLEEP */
#define OSC_QUERY_TYPE 0
#define OSC_SUPPORT_TYPE 1
#define OSC_CONTROL_TYPE 2
#define OSC_SUPPORT_MASKS 0x1f
/* _OSC DW0 Definition */
#define OSC_QUERY_ENABLE 1
#define OSC_REQUEST_ERROR 2
#define OSC_INVALID_UUID_ERROR 4
#define OSC_INVALID_REVISION_ERROR 8
#define OSC_CAPABILITIES_MASK_ERROR 16
/* _OSC DW1 Definition (OS Support Fields) */
#define OSC_EXT_PCI_CONFIG_SUPPORT 1
#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
#define OSC_MSI_SUPPORT 16
/* _OSC DW1 Definition (OS Control Fields) */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
#define OSC_SHPC_NATIVE_HP_CONTROL 2
#define OSC_PCI_EXPRESS_PME_CONTROL 4
#define OSC_PCI_EXPRESS_AER_CONTROL 8
#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
OSC_SHPC_NATIVE_HP_CONTROL | \
OSC_PCI_EXPRESS_PME_CONTROL | \
OSC_PCI_EXPRESS_AER_CONTROL | \
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
#else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void)

View File

@@ -77,20 +77,20 @@ typedef struct _agp_setup {
* The "prot" down below needs still a "sleep" flag somehow ...
*/
typedef struct _agp_segment {
off_t pg_start; /* starting page to populate */
size_t pg_count; /* number of pages */
int prot; /* prot flags for mmap */
__kernel_off_t pg_start; /* starting page to populate */
__kernel_size_t pg_count; /* number of pages */
int prot; /* prot flags for mmap */
} agp_segment;
typedef struct _agp_region {
pid_t pid; /* pid of process */
size_t seg_count; /* number of segments */
__kernel_pid_t pid; /* pid of process */
__kernel_size_t seg_count; /* number of segments */
struct _agp_segment *seg_list;
} agp_region;
typedef struct _agp_allocate {
int key; /* tag of allocation */
size_t pg_count; /* number of pages */
__kernel_size_t pg_count;/* number of pages */
__u32 type; /* 0 == normal, other devspec */
__u32 physical; /* device specific (some devices
* need a phys address of the
@@ -100,7 +100,7 @@ typedef struct _agp_allocate {
typedef struct _agp_bind {
int key; /* tag of allocation */
off_t pg_start; /* starting page to populate */
__kernel_off_t pg_start;/* starting page to populate */
} agp_bind;
typedef struct _agp_unbind {

View File

@@ -235,8 +235,6 @@ struct Outgoing {
struct arcnet_local {
struct net_device_stats stats;
uint8_t config, /* current value of CONFIG register */
timeout, /* Extended timeout for COM20020 */
backplane, /* Backplane flag for COM20020 */
@@ -335,7 +333,12 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
void arcnet_unregister_proto(struct ArcProto *proto);
irqreturn_t arcnet_interrupt(int irq, void *dev_id);
struct net_device *alloc_arcdev(char *name);
struct net_device *alloc_arcdev(const char *name);
int arcnet_open(struct net_device *dev);
int arcnet_close(struct net_device *dev);
int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev);
void arcnet_timeout(struct net_device *dev);
#endif /* __KERNEL__ */
#endif /* _LINUX_ARCDEVICE_H */

View File

@@ -21,6 +21,15 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
/* on architectures without dma-mapping capabilities we need to ensure
* that the asynchronous path compiles away
*/
#ifdef CONFIG_HAS_DMA
#define __async_inline
#else
#define __async_inline __always_inline
#endif
/**
* dma_chan_ref - object used to manage dma channels received from the
* dmaengine core.

View File

@@ -108,6 +108,8 @@ enum {
ATA_PIO5 = ATA_PIO4 | (1 << 5),
ATA_PIO6 = ATA_PIO5 | (1 << 6),
ATA_PIO4_ONLY = (1 << 4),
ATA_SWDMA0 = (1 << 0),
ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1),
ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2),
@@ -117,6 +119,8 @@ enum {
ATA_MWDMA0 = (1 << 0),
ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1),
ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2),
ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3),
ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4),
ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2),
ATA_MWDMA2_ONLY = (1 << 2),
@@ -131,6 +135,8 @@ enum {
ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
/* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
ATA_UDMA24_ONLY = (1 << 2) | (1 << 4),
ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
/* DMA-related */
@@ -244,8 +250,6 @@ enum {
ATA_CMD_MEDIA_UNLOCK = 0xDF,
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
/* EXABYTE specific */
ATA_EXABYTE_ENABLE_NEST = 0xF0,
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,

View File

@@ -0,0 +1,28 @@
/*
* Copyright (c) 2008 Atheros Communications Inc.
* Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _LINUX_ATH9K_PLATFORM_H
#define _LINUX_ATH9K_PLATFORM_H
#define ATH9K_PLAT_EEP_MAX_WORDS 2048
struct ath9k_platform_data {
u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
};
#endif /* _LINUX_ATH9K_PLATFORM_H */

View File

@@ -11,6 +11,7 @@
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/if_ether.h>
#include <linux/types.h>
/* ATM lec daemon control socket */
#define ATMLEC_CTRL _IO('a', ATMIOC_LANE)
@@ -78,8 +79,8 @@ struct atmlec_msg {
} normal;
struct atmlec_config_msg config;
struct {
uint16_t lec_id; /* requestor lec_id */
uint32_t tran_id; /* transaction id */
__u16 lec_id; /* requestor lec_id */
__u32 tran_id; /* transaction id */
unsigned char mac_addr[ETH_ALEN]; /* dst mac addr */
unsigned char atm_addr[ATM_ESA_LEN]; /* reqestor ATM addr */
} proxy; /*

View File

@@ -4,6 +4,7 @@
#include <linux/atmapi.h>
#include <linux/atmioc.h>
#include <linux/atm.h>
#include <linux/types.h>
#define ATMMPC_CTRL _IO('a', ATMIOC_MPOA)
#define ATMMPC_DATA _IO('a', ATMIOC_MPOA+1)
@@ -18,39 +19,39 @@ struct atmmpc_ioc {
};
typedef struct in_ctrl_info {
uint8_t Last_NHRP_CIE_code;
uint8_t Last_Q2931_cause_value;
uint8_t eg_MPC_ATM_addr[ATM_ESA_LEN];
__u8 Last_NHRP_CIE_code;
__u8 Last_Q2931_cause_value;
__u8 eg_MPC_ATM_addr[ATM_ESA_LEN];
__be32 tag;
__be32 in_dst_ip; /* IP address this ingress MPC sends packets to */
uint16_t holding_time;
uint32_t request_id;
__u16 holding_time;
__u32 request_id;
} in_ctrl_info;
typedef struct eg_ctrl_info {
uint8_t DLL_header[256];
uint8_t DH_length;
__u8 DLL_header[256];
__u8 DH_length;
__be32 cache_id;
__be32 tag;
__be32 mps_ip;
__be32 eg_dst_ip; /* IP address to which ingress MPC sends packets */
uint8_t in_MPC_data_ATM_addr[ATM_ESA_LEN];
uint16_t holding_time;
__u8 in_MPC_data_ATM_addr[ATM_ESA_LEN];
__u16 holding_time;
} eg_ctrl_info;
struct mpc_parameters {
uint16_t mpc_p1; /* Shortcut-Setup Frame Count */
uint16_t mpc_p2; /* Shortcut-Setup Frame Time */
uint8_t mpc_p3[8]; /* Flow-detection Protocols */
uint16_t mpc_p4; /* MPC Initial Retry Time */
uint16_t mpc_p5; /* MPC Retry Time Maximum */
uint16_t mpc_p6; /* Hold Down Time */
__u16 mpc_p1; /* Shortcut-Setup Frame Count */
__u16 mpc_p2; /* Shortcut-Setup Frame Time */
__u8 mpc_p3[8]; /* Flow-detection Protocols */
__u16 mpc_p4; /* MPC Initial Retry Time */
__u16 mpc_p5; /* MPC Retry Time Maximum */
__u16 mpc_p6; /* Hold Down Time */
} ;
struct k_message {
uint16_t type;
__u16 type;
__be32 ip_mask;
uint8_t MPS_ctrl[ATM_ESA_LEN];
__u8 MPS_ctrl[ATM_ESA_LEN];
union {
in_ctrl_info in_info;
eg_ctrl_info eg_info;
@@ -61,11 +62,11 @@ struct k_message {
struct llc_snap_hdr {
/* RFC 1483 LLC/SNAP encapsulation for routed IP PDUs */
uint8_t dsap; /* Destination Service Access Point (0xAA) */
uint8_t ssap; /* Source Service Access Point (0xAA) */
uint8_t ui; /* Unnumbered Information (0x03) */
uint8_t org[3]; /* Organizational identification (0x000000) */
uint8_t type[2]; /* Ether type (for IP) (0x0800) */
__u8 dsap; /* Destination Service Access Point (0xAA) */
__u8 ssap; /* Source Service Access Point (0xAA) */
__u8 ui; /* Unnumbered Information (0x03) */
__u8 org[3]; /* Organizational identification (0x000000) */
__u8 type[2]; /* Ether type (for IP) (0x0800) */
};
/* TLVs this MPC recognizes */

View File

@@ -36,7 +36,8 @@
* 1500 - 1599 kernel LSPP events
* 1600 - 1699 kernel crypto events
* 1700 - 1799 kernel anomaly records
* 1800 - 1999 future kernel use (maybe integrity labels and related events)
* 1800 - 1899 kernel integrity events
* 1900 - 1999 future kernel use
* 2000 is for otherwise unclassified kernel audit messages (legacy)
* 2001 - 2099 unused (kernel)
* 2100 - 2199 user space anomaly records
@@ -125,6 +126,12 @@
#define AUDIT_LAST_KERN_ANOM_MSG 1799
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
#define AUDIT_INTEGRITY_HASH 1803 /* Integrity HASH type */
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */

View File

@@ -10,8 +10,13 @@
#ifndef _LINUX_AUTO_DEV_IOCTL_H
#define _LINUX_AUTO_DEV_IOCTL_H
#include <linux/auto_fs.h>
#ifdef __KERNEL__
#include <linux/string.h>
#include <linux/types.h>
#else
#include <string.h>
#endif /* __KERNEL__ */
#define AUTOFS_DEVICE_NAME "autofs"

View File

@@ -17,10 +17,12 @@
#ifdef __KERNEL__
#include <linux/fs.h>
#include <linux/limits.h>
#include <asm/types.h>
#endif /* __KERNEL__ */
#include <linux/types.h>
#include <linux/ioctl.h>
#else
#include <asm/types.h>
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
/* This file describes autofs v3 */
#define AUTOFS_PROTO_VERSION 3

View File

@@ -35,8 +35,7 @@ struct linux_binprm{
#endif
struct mm_struct *mm;
unsigned long p; /* current top of mem */
unsigned int sh_bang:1,
misc_bang:1,
unsigned int
cred_prepared:1,/* true if creds already prepared (multiple
* preps happen for interpreters) */
cap_effective:1;/* true if has elevated effective capabilities,

View File

@@ -426,9 +426,6 @@ struct bio_set {
unsigned int front_pad;
mempool_t *bio_pool;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
mempool_t *bio_integrity_pool;
#endif
mempool_t *bvec_pool;
};
@@ -519,9 +516,8 @@ static inline int bio_has_data(struct bio *bio)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern void bio_integrity_free(struct bio *, struct bio_set *);
extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -531,27 +527,21 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init_slab(void);
extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
#else /* CONFIG_BLK_DEV_INTEGRITY */
#define bio_integrity(a) (0)
#define bioset_integrity_create(a, b) (0)
#define bio_integrity_prep(a) (0)
#define bio_integrity_enabled(a) (0)
#define bio_integrity_clone(a, b, c,d ) (0)
#define bioset_integrity_free(a) do { } while (0)
#define bio_integrity_free(a, b) do { } while (0)
#define bio_integrity_clone(a, b, c) (0)
#define bio_integrity_free(a) do { } while (0)
#define bio_integrity_endio(a, b) do { } while (0)
#define bio_integrity_advance(a, b) do { } while (0)
#define bio_integrity_trim(a, b, c) do { } while (0)
#define bio_integrity_split(a, b, c) do { } while (0)
#define bio_integrity_set_tag(a, b, c) do { } while (0)
#define bio_integrity_get_tag(a, b, c) do { } while (0)
#define bio_integrity_init_slab(a) do { } while (0)
#endif /* CONFIG_BLK_DEV_INTEGRITY */

View File

@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
#ifdef __KERNEL__
#if defined(CONFIG_BLK_DEV_IO_TRACE)
#include <linux/sysfs.h>
struct blk_trace {
int trace_state;
struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
#define blk_trace_shutdown(q) do { } while (0)

View File

@@ -146,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename,
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
/* Only NUMA needs hash distribution.
* IA64 and x86_64 have sufficient vmalloc space.
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
*/
#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64))
#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
#define HASHDIST_DEFAULT 1
#else
#define HASHDIST_DEFAULT 0

View File

@@ -1,12 +1,22 @@
#ifndef BSG_H
#define BSG_H
#include <linux/types.h>
#define BSG_PROTOCOL_SCSI 0
#define BSG_SUB_PROTOCOL_SCSI_CMD 0
#define BSG_SUB_PROTOCOL_SCSI_TMF 1
#define BSG_SUB_PROTOCOL_SCSI_TRANSPORT 2
/*
* For flags member below
* sg.h sg_io_hdr also has bits defined for it's flags member. However
* none of these bits are implemented/used by bsg. The bits below are
* allocated to not conflict with sg.h ones anyway.
*/
#define BSG_FLAG_Q_AT_TAIL 0x10 /* default, == 0 at this bit, is Q_AT_HEAD */
struct sg_io_v4 {
__s32 guard; /* [i] 'Q' to differentiate from v3 */
__u32 protocol; /* [i] 0 -> SCSI , .... */

View File

@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
void mark_buffer_async_write(struct buffer_head *bh);
void invalidate_bdev(struct block_device *);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
struct super_block *freeze_bdev(struct block_device *);
int thaw_bdev(struct block_device *, struct super_block *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
struct buffer_head *__getblk(struct block_device *bdev, sector_t block,
@@ -223,7 +216,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
void block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -339,22 +332,10 @@ extern int __set_page_dirty_buffers(struct page *page);
static inline void buffer_init(void) {}
static inline int try_to_free_buffers(struct page *page) { return 1; }
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
static inline void invalidate_bdev(struct block_device *bdev) {}
static inline struct super_block *freeze_bdev(struct block_device *sb)
{
return NULL;
}
static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
return 0;
}
#endif /* CONFIG_BLOCK */
#endif /* _LINUX_BUFFER_HEAD_H */

View File

@@ -15,6 +15,7 @@
#include <linux/cgroupstats.h>
#include <linux/prio_heap.h>
#include <linux/rwsem.h>
#include <linux/idr.h>
#ifdef CONFIG_CGROUPS
@@ -22,6 +23,7 @@ struct cgroupfs_root;
struct cgroup_subsys;
struct inode;
struct cgroup;
struct css_id;
extern int cgroup_init_early(void);
extern int cgroup_init(void);
@@ -47,18 +49,24 @@ enum cgroup_subsys_id {
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
/* The cgroup that this subsystem is attached to. Useful
/*
* The cgroup that this subsystem is attached to. Useful
* for subsystems that want to know about the cgroup
* hierarchy structure */
* hierarchy structure
*/
struct cgroup *cgroup;
/* State maintained by the cgroup system to allow subsystems
/*
* State maintained by the cgroup system to allow subsystems
* to be "busy". Should be accessed via css_get(),
* css_tryget() and and css_put(). */
* css_tryget() and and css_put().
*/
atomic_t refcnt;
unsigned long flags;
/* ID for this css, if possible */
struct css_id *id;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css)
enum {
/* Control Group is dead */
CGRP_REMOVED,
/* Control Group has previously had a child cgroup or a task,
* but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
/*
* Control Group has previously had a child cgroup or a task,
* but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
*/
CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
/*
* A thread in rmdir() is wating for this cgroup.
*/
CGRP_WAIT_ON_RMDIR,
};
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
/* count users of this cgroup. >0 means busy, but doesn't
* necessarily indicate the number of tasks in the
* cgroup */
/*
* count users of this cgroup. >0 means busy, but doesn't
* necessarily indicate the number of tasks in the cgroup
*/
atomic_t count;
/*
@@ -142,7 +157,7 @@ struct cgroup {
struct list_head sibling; /* my parent's children */
struct list_head children; /* my children */
struct cgroup *parent; /* my parent */
struct cgroup *parent; /* my parent */
struct dentry *dentry; /* cgroup fs entry, RCU protected */
/* Private pointers for each registered subsystem */
@@ -177,11 +192,12 @@ struct cgroup {
struct rcu_head rcu_head;
};
/* A css_set is a structure holding pointers to a set of
/*
* A css_set is a structure holding pointers to a set of
* cgroup_subsys_state objects. This saves space in the task struct
* object and speeds up fork()/exit(), since a single inc/dec and a
* list_add()/del() can bump the reference count on the entire
* cgroup set for a task.
* list_add()/del() can bump the reference count on the entire cgroup
* set for a task.
*/
struct css_set {
@@ -226,13 +242,8 @@ struct cgroup_map_cb {
void *state;
};
/* struct cftype:
*
* The files in the cgroup filesystem mostly have a very simple read/write
* handling, some common function will take care of it. Nevertheless some cases
* (read tasks) are special and therefore I define this structure for every
* kind of file.
*
/*
* struct cftype: handler definitions for cgroup control files
*
* When reading/writing to a file:
* - the cgroup to use is file->f_dentry->d_parent->d_fsdata
@@ -241,10 +252,17 @@ struct cgroup_map_cb {
#define MAX_CFTYPE_NAME 64
struct cftype {
/* By convention, the name should begin with the name of the
* subsystem, followed by a period */
/*
* By convention, the name should begin with the name of the
* subsystem, followed by a period
*/
char name[MAX_CFTYPE_NAME];
int private;
/*
* If not 0, file mode is set to this value, otherwise it will
* be figured out automatically
*/
mode_t mode;
/*
* If non-zero, defines the maximum length of string that can
@@ -319,15 +337,20 @@ struct cgroup_scanner {
void (*process_task)(struct task_struct *p,
struct cgroup_scanner *scan);
struct ptr_heap *heap;
void *data;
};
/* Add a new file to the given cgroup directory. Should only be
* called by subsystems from within a populate() method */
/*
* Add a new file to the given cgroup directory. Should only be
* called by subsystems from within a populate() method
*/
int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
const struct cftype *cft);
/* Add a set of new files to the given cgroup directory. Should
* only be called by subsystems from within a populate() method */
/*
* Add a set of new files to the given cgroup directory. Should
* only be called by subsystems from within a populate() method
*/
int cgroup_add_files(struct cgroup *cgrp,
struct cgroup_subsys *subsys,
const struct cftype cft[],
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
int cgroup_task_count(const struct cgroup *cgrp);
/* Return true if the cgroup is a descendant of the current cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp);
/* Return true if cgrp is a descendant of the task's cgroup */
int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
/* Control Group subsystem type. See Documentation/cgroups.txt for details */
/*
* Control Group subsystem type.
* See Documentation/cgroups/cgroups.txt for details
*/
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cgrp);
void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
int (*can_attach)(struct cgroup_subsys *ss,
struct cgroup *cgrp, struct task_struct *tsk);
@@ -364,6 +390,11 @@ struct cgroup_subsys {
int active;
int disabled;
int early_init;
/*
* True if this subsys uses ID. ID is not available before cgroup_init()
* (not available in early_init time.)
*/
bool use_id;
#define MAX_CGROUP_TYPE_NAMELEN 32
const char *name;
@@ -386,6 +417,9 @@ struct cgroup_subsys {
*/
struct cgroupfs_root *root;
struct list_head sibling;
/* used when use_id == true */
struct idr idr;
spinlock_t id_lock;
};
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -419,7 +453,8 @@ struct cgroup_iter {
struct list_head *task;
};
/* To iterate across the tasks in a cgroup:
/*
* To iterate across the tasks in a cgroup:
*
* 1) call cgroup_iter_start to intialize an iterator
*
@@ -428,9 +463,10 @@ struct cgroup_iter {
*
* 3) call cgroup_iter_end() to destroy the iterator.
*
* Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
* - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
* callback, but not while calling the process_task() callback.
* Or, call cgroup_scan_tasks() to iterate through every task in a
* cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
* the test_task() callback, but not while calling the process_task()
* callback.
*/
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
int cgroup_scan_tasks(struct cgroup_scanner *scan);
int cgroup_attach_task(struct cgroup *, struct task_struct *);
/*
* CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
* if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
* CSS ID is assigned at cgroup allocation (create) automatically
* and removed when subsys calls free_css_id() function. This is because
* the lifetime of cgroup_subsys_state is subsys's matter.
*
* Looking up and scanning function should be called under rcu_read_lock().
* Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
* But the css returned by this routine can be "not populated yet" or "being
* destroyed". The caller should check css and cgroup's status.
*/
/*
* Typically Called at ->destroy(), or somewhere the subsys frees
* cgroup_subsys_state.
*/
void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
/* Find a cgroup_subsys_state which has given ID */
struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
/*
* Get a cgroup whose id is greater than or equal to id under tree of root.
* Returning a cgroup_subsys_state or NULL.
*/
struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
struct cgroup_subsys_state *root, int *foundid);
/* Returns true if root is ancestor of cg */
bool css_is_ancestor(struct cgroup_subsys_state *cg,
const struct cgroup_subsys_state *root);
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
unsigned short css_depth(struct cgroup_subsys_state *css);
#else /* !CONFIG_CGROUPS */
static inline int cgroup_init_early(void) { return 0; }

View File

@@ -125,4 +125,21 @@ int clk_set_parent(struct clk *clk, struct clk *parent);
*/
struct clk *clk_get_parent(struct clk *clk);
/**
* clk_get_sys - get a clock based upon the device name
* @dev_id: device name
* @con_id: connection ID
*
* Returns a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
* uses @dev_id and @con_id to determine the clock consumer, and
* thereby the clock producer. In contrast to clk_get() this function
* takes the device name instead of the device itself for identification.
*
* Drivers must assume that the clock source is not enabled.
*
* clk_get_sys should not be called from within interrupt context.
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
#endif

View File

@@ -21,9 +21,110 @@
typedef u64 cycle_t;
struct clocksource;
/**
* struct cyclecounter - hardware abstraction for a free running counter
* Provides completely state-free accessors to the underlying hardware.
* Depending on which hardware it reads, the cycle counter may wrap
* around quickly. Locking rules (if necessary) have to be defined
* by the implementor and user of specific instances of this API.
*
* @read: returns the current cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters,
* see CLOCKSOURCE_MASK() helper macro
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*/
struct cyclecounter {
cycle_t (*read)(const struct cyclecounter *cc);
cycle_t mask;
u32 mult;
u32 shift;
};
/**
* struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
* Contains the state needed by timecounter_read() to detect
* cycle counter wrap around. Initialize with
* timecounter_init(). Also used to convert cycle counts into the
* corresponding nanosecond counts with timecounter_cyc2time(). Users
* of this code are responsible for initializing the underlying
* cycle counter hardware, locking issues and reading the time
* more often than the cycle counter wraps around. The nanosecond
* counter will only wrap around after ~585 years.
*
* @cc: the cycle counter used by this instance
* @cycle_last: most recent cycle counter value seen by
* timecounter_read()
* @nsec: continuously increasing count
*/
struct timecounter {
const struct cyclecounter *cc;
cycle_t cycle_last;
u64 nsec;
};
/**
* cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
* @tc: Pointer to cycle counter.
* @cycles: Cycles
*
* XXX - This could use some mult_lxl_ll() asm optimization. Same code
* as in cyc2ns, but with unsigned result.
*/
static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
cycle_t cycles)
{
u64 ret = (u64)cycles;
ret = (ret * cc->mult) >> cc->shift;
return ret;
}
/**
* timecounter_init - initialize a time counter
* @tc: Pointer to time counter which is to be initialized/reset
* @cc: A cycle counter, ready to be used.
* @start_tstamp: Arbitrary initial time stamp.
*
* After this call the current cycle register (roughly) corresponds to
* the initial time stamp. Every call to timecounter_read() increments
* the time stamp counter by the number of elapsed nanoseconds.
*/
extern void timecounter_init(struct timecounter *tc,
const struct cyclecounter *cc,
u64 start_tstamp);
/**
* timecounter_read - return nanoseconds elapsed since timecounter_init()
* plus the initial time stamp
* @tc: Pointer to time counter.
*
* In other words, keeps track of time since the same epoch as
* the function which generated the initial time stamp.
*/
extern u64 timecounter_read(struct timecounter *tc);
/**
* timecounter_cyc2time - convert a cycle counter to same
* time base as values returned by
* timecounter_read()
* @tc: Pointer to time counter.
* @cycle: a value returned by tc->cc->read()
*
* Cycle counts that are converted correctly as long as they
* fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
* with "max cycle count" == cs->mask+1.
*
* This allows conversion of cycle counter values which were generated
* in the past.
*/
extern u64 timecounter_cyc2time(struct timecounter *tc,
cycle_t cycle_tstamp);
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
* @name: ptr to clocksource name
* @list: list head for registration

View File

@@ -1,6 +1,8 @@
#ifndef _CM4000_H_
#define _CM4000_H_
#include <linux/types.h>
#define MAX_ATR 33
#define CM4000_MAX_DEV 4
@@ -10,9 +12,9 @@
* not to break compilation of userspace apps. -HW */
typedef struct atreq {
int32_t atr_len;
__s32 atr_len;
unsigned char atr[64];
int32_t power_act;
__s32 power_act;
unsigned char bIFSD;
unsigned char bIFSC;
} atreq_t;
@@ -22,13 +24,13 @@ typedef struct atreq {
* member sizes. This leads to CONFIG_COMPAT breakage, since 32bit userspace
* will lay out the structure members differently than the 64bit kernel.
*
* I've changed "ptsreq.protocol" from "unsigned long" to "u_int32_t".
* I've changed "ptsreq.protocol" from "unsigned long" to "__u32".
* On 32bit this will make no difference. With 64bit kernels, it will make
* 32bit apps work, too.
*/
typedef struct ptsreq {
u_int32_t protocol; /*T=0: 2^0, T=1: 2^1*/
__u32 protocol; /*T=0: 2^0, T=1: 2^1*/
unsigned char flags;
unsigned char pts1;
unsigned char pts2;

View File

@@ -65,20 +65,20 @@ struct proc_event {
} ack;
struct fork_proc_event {
pid_t parent_pid;
pid_t parent_tgid;
pid_t child_pid;
pid_t child_tgid;
__kernel_pid_t parent_pid;
__kernel_pid_t parent_tgid;
__kernel_pid_t child_pid;
__kernel_pid_t child_tgid;
} fork;
struct exec_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
} exec;
struct id_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
union {
__u32 ruid; /* task uid */
__u32 rgid; /* task gid */
@@ -90,8 +90,8 @@ struct proc_event {
} id;
struct exit_proc_event {
pid_t process_pid;
pid_t process_tgid;
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
__u32 exit_code, exit_signal;
} exit;
} event_data;

View File

@@ -29,6 +29,7 @@
int com20020_check(struct net_device *dev);
int com20020_found(struct net_device *dev, int shared);
extern const struct net_device_ops com20020_netdev_ops;
/* The number of low I/O ports used by the card. */
#define ARCNET_TOTAL_SIZE 8

View File

@@ -125,6 +125,13 @@ struct compat_dirent {
char d_name[256];
};
struct compat_ustat {
compat_daddr_t f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
typedef union compat_sigval {
compat_int_t sival_int;
compat_uptr_t sival_ptr;
@@ -178,11 +185,18 @@ long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned nsems, const struct compat_timespec __user *timeout);
asmlinkage long compat_sys_keyctl(u32 option,
u32 arg2, u32 arg3, u32 arg4, u32 arg5);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage ssize_t compat_sys_readv(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_writev(unsigned long fd,
const struct compat_iovec __user *vec, unsigned long vlen);
asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
int compat_do_execve(char * filename, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs * regs);

View File

@@ -3,8 +3,10 @@
#endif
/* GCC 4.1.[01] miscompiles __weak */
#if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# error Your version of gcc miscompiles the __weak directive
#ifdef __KERNEL__
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
#define __used __attribute__((__used__))

View File

@@ -68,6 +68,7 @@ struct ftrace_branch_data {
unsigned long miss;
unsigned long hit;
};
unsigned long miss_hit[2];
};
};
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
.line = __LINE__, \
}; \
______r = !!(cond); \
if (______r) \
______f.hit++; \
else \
______f.miss++; \
______f.miss_hit[______r]++; \
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */

View File

@@ -39,8 +39,10 @@
#define CN_IDX_V86D 0x4
#define CN_VAL_V86D_UVESAFB 0x1
#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
#define CN_DST_IDX 0x6
#define CN_DST_VAL 0x1
#define CN_NETLINK_USERS 6
#define CN_NETLINK_USERS 7
/*
* Maximum connector's message size.
@@ -109,6 +111,12 @@ struct cn_queue_dev {
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
/* Sent to kevent to create cn_queue only when needed */
struct work_struct wq_creation;
/* Tell if the wq_creation job is pending/completed */
atomic_t wq_requested;
/* Wait for cn_queue to be created */
wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@@ -164,6 +172,8 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);

View File

@@ -137,8 +137,8 @@ extern void resume_console(void);
int mda_console_init(void);
void prom_con_init(void);
void vcs_make_sysfs(struct tty_struct *tty);
void vcs_remove_sysfs(struct tty_struct *tty);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
#if 1

View File

@@ -23,7 +23,6 @@
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/mutex.h>
struct cpu {
int node_id; /* The node which contains the CPU */
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
{
mutex_lock(cpu_hp_mutex);
}
static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
{
mutex_unlock(cpu_hp_mutex);
}
extern void get_online_cpus(void);
extern void put_online_cpus(void);
#define hotcpu_notifier(fn, pri) { \
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
{ }
static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
{ }
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)

View File

@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/nodemask.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
#ifdef CONFIG_CPUSETS
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void);
void cpuset_update_task_memory_state(void);
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_softwall(z, gfp_mask);
__cpuset_node_allowed_softwall(node, gfp_mask);
}
static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return number_of_cpusets <= 1 ||
__cpuset_zone_allowed_hardwall(z, gfp_mask);
__cpuset_node_allowed_hardwall(node, gfp_mask);
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
}
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{
return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
}
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -90,12 +101,12 @@ static inline void cpuset_init_smp(void) {}
static inline void cpuset_cpus_allowed(struct task_struct *p,
struct cpumask *mask)
{
*mask = cpu_possible_map;
cpumask_copy(mask, cpu_possible_mask);
}
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
struct cpumask *mask)
{
*mask = cpu_possible_map;
cpumask_copy(mask, cpu_possible_mask);
}
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
return 1;
}
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
return 1;
}
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
return 1;

View File

@@ -40,6 +40,7 @@
#define CRYPTO_ALG_TYPE_SHASH 0x00000009
#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
@@ -548,9 +549,6 @@ struct crypto_attr_u32 {
* Transform user interface.
*/
struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask);
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);

View File

@@ -82,9 +82,9 @@ struct cyclades_monitor {
* open)
*/
struct cyclades_idle_stats {
time_t in_use; /* Time device has been in use (secs) */
time_t recv_idle; /* Time since last char received (secs) */
time_t xmit_idle; /* Time since last char transmitted (secs) */
__kernel_time_t in_use; /* Time device has been in use (secs) */
__kernel_time_t recv_idle; /* Time since last char received (secs) */
__kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */
unsigned long recv_bytes; /* Bytes received */
unsigned long xmit_bytes; /* Bytes transmitted */
unsigned long overruns; /* Input overruns */

View File

@@ -1,3 +1,23 @@
/*
* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef DCA_H
#define DCA_H
/* DCA Provider API */

View File

@@ -112,7 +112,7 @@ struct dentry {
struct list_head d_subdirs; /* our children */
struct list_head d_alias; /* inode alias list */
unsigned long d_time; /* used by d_revalidate */
struct dentry_operations *d_op;
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
void *d_fsdata; /* fs-specific data */

View File

@@ -355,46 +355,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
return __dccp_hdr_len(dccp_hdr(skb));
}
/* initial values for each feature */
#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
#define DCCPF_INITIAL_ACK_RATIO 2
#define DCCPF_INITIAL_CCID DCCPC_CCID2
/* FIXME: for now we're default to 1 but it should really be 0 */
#define DCCPF_INITIAL_SEND_NDP_COUNT 1
/**
* struct dccp_minisock - Minimal DCCP connection representation
*
* Will be used to pass the state from dccp_request_sock to dccp_sock.
*
* @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
* @dccpms_pending - List of features being negotiated
* @dccpms_conf -
*/
struct dccp_minisock {
__u64 dccpms_sequence_window;
struct list_head dccpms_pending;
struct list_head dccpms_conf;
};
struct dccp_opt_conf {
__u8 *dccpoc_val;
__u8 dccpoc_len;
};
struct dccp_opt_pend {
struct list_head dccpop_node;
__u8 dccpop_type;
__u8 dccpop_feat;
__u8 *dccpop_val;
__u8 dccpop_len;
int dccpop_conf;
struct dccp_opt_conf *dccpop_sc;
};
extern void dccp_minisock_init(struct dccp_minisock *dmsk);
/**
* struct dccp_request_sock - represent DCCP-specific connection request
* @dreq_inet_rsk: structure inherited from
@@ -483,13 +443,14 @@ struct dccp_ackvec;
* @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
* @dccps_l_ack_ratio - feature-local Ack Ratio
* @dccps_r_ack_ratio - feature-remote Ack Ratio
* @dccps_l_seq_win - local Sequence Window (influences ack number validity)
* @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
* @dccps_pcslen - sender partial checksum coverage (via sockopt)
* @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
* @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
* @dccps_ndp_count - number of Non Data Packets since last data packet
* @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
* @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
* @dccps_minisock - associated minisock (accessed via dccp_msk)
* @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
* @dccps_hc_rx_ackvec - rx half connection ack vector
* @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
@@ -523,12 +484,13 @@ struct dccp_sock {
__u32 dccps_timestamp_time;
__u16 dccps_l_ack_ratio;
__u16 dccps_r_ack_ratio;
__u64 dccps_l_seq_win:48;
__u64 dccps_r_seq_win:48;
__u8 dccps_pcslen:4;
__u8 dccps_pcrlen:4;
__u8 dccps_send_ndp_count:1;
__u64 dccps_ndp_count:48;
unsigned long dccps_rate_last;
struct dccp_minisock dccps_minisock;
struct list_head dccps_featneg;
struct dccp_ackvec *dccps_hc_rx_ackvec;
struct ccid *dccps_hc_rx_ccid;
@@ -546,11 +508,6 @@ static inline struct dccp_sock *dccp_sk(const struct sock *sk)
return (struct dccp_sock *)sk;
}
static inline struct dccp_minisock *dccp_msk(const struct sock *sk)
{
return (struct dccp_minisock *)&dccp_sk(sk)->dccps_minisock;
}
static inline const char *dccp_role(const struct sock *sk)
{
switch (dccp_sk(sk)->dccps_role) {

View File

@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
bool debugfs_initialized(void);
#else
#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
return ERR_PTR(-ENODEV);
}
static inline bool debugfs_initialized(void)
{
return false;
}
#endif
#endif

View File

@@ -139,6 +139,9 @@ struct target_type {
dm_ioctl_fn ioctl;
dm_merge_fn merge;
dm_busy_fn busy;
/* For internal device-mapper use. */
struct list_head list;
};
struct io_restrictions {

View File

@@ -28,6 +28,7 @@
#define BUS_ID_SIZE 20
struct device;
struct device_private;
struct device_driver;
struct driver_private;
struct class;
@@ -147,7 +148,7 @@ extern void put_driver(struct device_driver *drv);
extern struct device_driver *driver_find(const char *name,
struct bus_type *bus);
extern int driver_probe_done(void);
extern int wait_for_device_probe(void);
extern void wait_for_device_probe(void);
/* sysfs interface for exporting driver attributes */
@@ -367,15 +368,11 @@ struct device_dma_parameters {
};
struct device {
struct klist klist_children;
struct klist_node knode_parent; /* node in sibling list */
struct klist_node knode_driver;
struct klist_node knode_bus;
struct device *parent;
struct device_private *p;
struct kobject kobj;
char bus_id[BUS_ID_SIZE]; /* position on parent bus */
unsigned uevent_suppress:1;
const char *init_name; /* initial name of the device */
struct device_type *type;
@@ -387,8 +384,13 @@ struct device {
struct device_driver *driver; /* which driver has allocated this
device */
void *driver_data; /* data private to the driver */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *platform_data; /* We will remove platform_data
field if all platform devices
pass its platform specific data
from platform_device->platform_data,
other kind of devices should not
use platform_data. */
struct dev_pm_info power;
#ifdef CONFIG_NUMA
@@ -427,8 +429,7 @@ struct device {
static inline const char *dev_name(const struct device *dev)
{
/* will be changed into kobject_name(&dev->kobj) in the near future */
return dev->bus_id;
return kobject_name(&dev->kobj);
}
extern int dev_set_name(struct device *dev, const char *name, ...)
@@ -463,6 +464,16 @@ static inline void dev_set_drvdata(struct device *dev, void *data)
dev->driver_data = data;
}
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
{
return dev->kobj.uevent_suppress;
}
static inline void dev_set_uevent_suppress(struct device *dev, int val)
{
dev->kobj.uevent_suppress = val;
}
static inline int device_is_registered(struct device *dev)
{
return dev->kobj.state_in_sysfs;
@@ -483,7 +494,8 @@ extern int device_for_each_child(struct device *dev, void *data,
extern struct device *device_find_child(struct device *dev, void *data,
int (*match)(struct device *dev, void *data));
extern int device_rename(struct device *dev, char *new_name);
extern int device_move(struct device *dev, struct device *new_parent);
extern int device_move(struct device *dev, struct device *new_parent,
enum dpm_order dpm_order);
/*
* Root device objects for grouping under /sys/devices
@@ -570,7 +582,7 @@ extern const char *dev_driver_string(const struct device *dev);
#if defined(DEBUG)
#define dev_dbg(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define dev_dbg(dev, format, ...) do { \
dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
} while (0)

View File

@@ -9,6 +9,8 @@
#ifndef _DLM_NETLINK_H
#define _DLM_NETLINK_H
#include <linux/types.h>
enum {
DLM_STATUS_WAITING = 1,
DLM_STATUS_GRANTED = 2,
@@ -18,16 +20,16 @@ enum {
#define DLM_LOCK_DATA_VERSION 1
struct dlm_lock_data {
uint16_t version;
uint32_t lockspace_id;
__u16 version;
__u32 lockspace_id;
int nodeid;
int ownpid;
uint32_t id;
uint32_t remid;
uint64_t xid;
int8_t status;
int8_t grmode;
int8_t rqmode;
__u32 id;
__u32 remid;
__u64 xid;
__s8 status;
__s8 grmode;
__s8 rqmode;
unsigned long timestamp;
int resource_namelen;
char resource_name[DLM_RESNAME_MAXLEN];

View File

@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
const char *name;
struct module *module;
/* For internal device-mapper use */
struct list_head list;
int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
unsigned argc, char **argv);
void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
*/
int (*status)(struct dm_dirty_log *log, status_type_t status_type,
char *result, unsigned maxlen);
/*
* is_remote_recovering is necessary for cluster mirroring. It provides
* a way to detect recovery on another node, so we aren't writing
* concurrently. This function is likely to block (when a cluster log
* is used).
*
* Returns: 0, 1
*/
int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
};
int dm_dirty_log_type_register(struct dm_dirty_log_type *type);

View File

@@ -113,20 +113,20 @@ struct dm_ioctl {
* return -ENOTTY) fill out this field, even if the
* command failed.
*/
uint32_t version[3]; /* in/out */
uint32_t data_size; /* total size of data passed in
__u32 version[3]; /* in/out */
__u32 data_size; /* total size of data passed in
* including this struct */
uint32_t data_start; /* offset to start of data
__u32 data_start; /* offset to start of data
* relative to start of this struct */
uint32_t target_count; /* in/out */
int32_t open_count; /* out */
uint32_t flags; /* in/out */
uint32_t event_nr; /* in/out */
uint32_t padding;
__u32 target_count; /* in/out */
__s32 open_count; /* out */
__u32 flags; /* in/out */
__u32 event_nr; /* in/out */
__u32 padding;
uint64_t dev; /* in/out */
__u64 dev; /* in/out */
char name[DM_NAME_LEN]; /* device name */
char uuid[DM_UUID_LEN]; /* unique identifier for
@@ -139,9 +139,9 @@ struct dm_ioctl {
* dm_ioctl.
*/
struct dm_target_spec {
uint64_t sector_start;
uint64_t length;
int32_t status; /* used when reading from kernel only */
__u64 sector_start;
__u64 length;
__s32 status; /* used when reading from kernel only */
/*
* Location of the next dm_target_spec.
@@ -153,7 +153,7 @@ struct dm_target_spec {
* (that follows the dm_ioctl struct) to the start of the "next"
* dm_target_spec.
*/
uint32_t next;
__u32 next;
char target_type[DM_MAX_TYPE_NAME];
@@ -168,17 +168,17 @@ struct dm_target_spec {
* Used to retrieve the target dependencies.
*/
struct dm_target_deps {
uint32_t count; /* Array size */
uint32_t padding; /* unused */
uint64_t dev[0]; /* out */
__u32 count; /* Array size */
__u32 padding; /* unused */
__u64 dev[0]; /* out */
};
/*
* Used to get a list of all dm devices.
*/
struct dm_name_list {
uint64_t dev;
uint32_t next; /* offset to the next record from
__u64 dev;
__u32 next; /* offset to the next record from
the _start_ of this */
char name[0];
};
@@ -187,8 +187,8 @@ struct dm_name_list {
* Used to retrieve the target versions
*/
struct dm_target_versions {
uint32_t next;
uint32_t version[3];
__u32 next;
__u32 version[3];
char name[0];
};
@@ -197,7 +197,7 @@ struct dm_target_versions {
* Used to pass message to a target
*/
struct dm_target_msg {
uint64_t sector; /* Device sector */
__u64 sector; /* Device sector */
char message[0];
};

174
include/linux/dma-debug.h Normal file
View File

@@ -0,0 +1,174 @@
/*
* Copyright (C) 2008 Advanced Micro Devices, Inc.
*
* Author: Joerg Roedel <joerg.roedel@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __DMA_DEBUG_H
#define __DMA_DEBUG_H
#include <linux/types.h>
struct device;
struct scatterlist;
struct bus_type;
#ifdef CONFIG_DMA_API_DEBUG
extern void dma_debug_add_bus(struct bus_type *bus);
extern void dma_debug_init(u32 num_entries);
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
bool map_single);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction, bool map_single);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction);
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir);
extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt);
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
int direction);
extern void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction);
extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction);
extern void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size, int direction);
extern void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
extern void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction);
extern void debug_dma_dump_mappings(struct device *dev);
#else /* CONFIG_DMA_API_DEBUG */
static inline void dma_debug_add_bus(struct bus_type *bus)
{
}
static inline void dma_debug_init(u32 num_entries)
{
}
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
bool map_single)
{
}
static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction,
bool map_single)
{
}
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction)
{
}
static inline void debug_dma_unmap_sg(struct device *dev,
struct scatterlist *sglist,
int nelems, int dir)
{
}
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt)
{
}
static inline void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr)
{
}
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
}
static inline void debug_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
{
}
static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
int direction)
{
}
static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
{
}
static inline void debug_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg,
int nelems, int direction)
{
}
static inline void debug_dma_dump_mappings(struct device *dev)
{
}
#endif /* CONFIG_DMA_API_DEBUG */
#endif /* __DMA_DEBUG_H */

View File

@@ -3,6 +3,8 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-attrs.h>
#include <linux/scatterlist.h>
/* These definitions mirror those in pci.h, so they can be used
* interchangeably with their PCI_ counterparts */
@@ -13,6 +15,52 @@ enum dma_data_direction {
DMA_NONE = 3,
};
struct dma_map_ops {
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_single_for_device)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
void (*sync_single_range_for_cpu)(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir);
void (*sync_single_range_for_device)(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction dir);
void (*sync_sg_for_cpu)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
int is_phys;
};
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
/*

View File

@@ -11,6 +11,7 @@
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
struct intel_iommu;
struct dmar_domain;

View File

@@ -23,9 +23,6 @@
#include <linux/device.h>
#include <linux/uio.h>
#include <linux/kref.h>
#include <linux/completion.h>
#include <linux/rcupdate.h>
#include <linux/dma-mapping.h>
/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
/**
* struct dma_device - info on the entity supplying DMA services
* @chancnt: how many DMA channels are supported
* @privatecnt: how many DMA channels are requested by dma_request_channel
* @channels: the list of struct dma_chan
* @global_node: list_head for global dma_device_list
* @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
struct dma_device {
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
}
#endif
#ifdef CONFIG_ASYNC_TX_DMA
#define async_dmaengine_get() dmaengine_get()
#define async_dmaengine_put() dmaengine_put()
#define async_dma_find_channel(type) dma_find_channel(type)
#else
static inline void async_dmaengine_get(void)
{
}
static inline void async_dmaengine_put(void)
{
}
static inline struct dma_chan *
async_dma_find_channel(enum dma_transaction_type type)
{
return NULL;
}
#endif
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
void *dest, void *src, size_t len);
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
set_bit(tx_type, dstp->bits);
}
#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
static inline void
__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
{
clear_bit(tx_type, dstp->bits);
}
#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
{

View File

@@ -24,10 +24,10 @@
#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/msi.h>
#include <linux/irqreturn.h>
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct intel_iommu;
#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
struct dmar_drhd_unit {
struct list_head list; /* list of drhd units */
struct acpi_dmar_header *hdr; /* ACPI header */
@@ -49,7 +49,7 @@ extern int dmar_dev_scope_init(void);
/* Intel IOMMU detection */
extern void detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void);
extern int parse_ioapics_under_ir(void);
extern int alloc_iommu(struct dmar_drhd_unit *);
@@ -63,12 +63,12 @@ static inline int dmar_table_init(void)
{
return -ENODEV;
}
static inline int enable_drhd_fault_handling(void)
{
return -1;
}
#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
struct irte {
union {
struct {
@@ -97,6 +97,10 @@ struct irte {
__u64 high;
};
};
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
extern int enable_intr_remapping(int);
extern int get_irte(int irq, struct irte *entry);
extern int modify_irte(int irq, struct irte *irte_modified);
extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
@@ -111,14 +115,40 @@ extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic);
#else
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
return -1;
}
static inline int modify_irte(int irq, struct irte *irte_modified)
{
return -1;
}
static inline int free_irte(int irq)
{
return -1;
}
static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
return -1;
}
static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
u16 sub_handle)
{
return -1;
}
static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
{
return NULL;
}
static inline struct intel_iommu *map_ioapic_to_ir(int apic)
{
return NULL;
}
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
#define intr_remapping_enabled (0)
#endif
#ifdef CONFIG_DMAR
extern const char *dmar_get_fault_reason(u8 fault_reason);
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
@@ -127,8 +157,10 @@ extern void dmar_msi_mask(unsigned int irq);
extern void dmar_msi_read(int irq, struct msi_msg *msg);
extern void dmar_msi_write(int irq, struct msi_msg *msg);
extern int dmar_set_interrupt(struct intel_iommu *iommu);
extern irqreturn_t dmar_fault(int irq, void *dev_id);
extern int arch_setup_dmar_msi(unsigned int irq);
#ifdef CONFIG_DMAR
extern int iommu_detected, no_iommu;
extern struct list_head dmar_rmrr_units;
struct dmar_rmrr_unit {

View File

@@ -47,7 +47,8 @@ extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_name_in_serial(const char *str);
extern int dmi_available;
extern int dmi_walk(void (*decode)(const struct dmi_header *));
extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data);
extern bool dmi_match(enum dmi_field f, const char *str);
#else
@@ -61,8 +62,8 @@ static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
static inline int dmi_name_in_serial(const char *s) { return 0; }
#define dmi_available 0
static inline int dmi_walk(void (*decode)(const struct dmi_header *))
{ return -1; }
static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data) { return -1; }
static inline bool dmi_match(enum dmi_field f, const char *str)
{ return false; }
static inline const struct dmi_system_id *

View File

@@ -1,12 +0,0 @@
/* platform data for the DS1WM driver */
struct ds1wm_platform_data {
int bus_shift; /* number of shifts needed to calculate the
* offset between DS1WM registers;
* e.g. on h5xxx and h2200 this is 2
* (registers aligned to 4-byte boundaries),
* while on hx4700 this is 1 */
int active_high;
void (*enable)(struct platform_device *pdev);
void (*disable)(struct platform_device *pdev);
};

587
include/linux/dst.h Normal file
View File

@@ -0,0 +1,587 @@
/*
* 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __DST_H
#define __DST_H
#include <linux/types.h>
#include <linux/connector.h>
#define DST_NAMELEN 32
#define DST_NAME "dst"
enum {
/* Remove node with given id from storage */
DST_DEL_NODE = 0,
/* Add remote node with given id to the storage */
DST_ADD_REMOTE,
/* Add local node with given id to the storage to be exported and used by remote peers */
DST_ADD_EXPORT,
/* Crypto initialization command (hash/cipher used to protect the connection) */
DST_CRYPTO,
/* Security attributes for given connection (permissions for example) */
DST_SECURITY,
/* Register given node in the block layer subsystem */
DST_START,
DST_CMD_MAX
};
struct dst_ctl
{
/* Storage name */
char name[DST_NAMELEN];
/* Command flags */
__u32 flags;
/* Command itself (see above) */
__u32 cmd;
/* Maximum number of pages per single request in this device */
__u32 max_pages;
/* Stale/error transaction scanning timeout in milliseconds */
__u32 trans_scan_timeout;
/* Maximum number of retry sends before completing transaction as broken */
__u32 trans_max_retries;
/* Storage size */
__u64 size;
};
/* Reply command carries completion status */
struct dst_ctl_ack
{
struct cn_msg msg;
int error;
int unused[3];
};
/*
* Unfortunaltely socket address structure is not exported to userspace
* and is redefined there.
*/
#define SADDR_MAX_DATA 128
struct saddr {
/* address family, AF_xxx */
unsigned short sa_family;
/* 14 bytes of protocol address */
char sa_data[SADDR_MAX_DATA];
/* Number of bytes used in sa_data */
unsigned short sa_data_len;
};
/* Address structure */
struct dst_network_ctl
{
/* Socket type: datagram, stream...*/
unsigned int type;
/* Let me guess, is it a Jupiter diameter? */
unsigned int proto;
/* Peer's address */
struct saddr addr;
};
struct dst_crypto_ctl
{
/* Cipher and hash names */
char cipher_algo[DST_NAMELEN];
char hash_algo[DST_NAMELEN];
/* Key sizes. Can be zero for digest for example */
unsigned int cipher_keysize, hash_keysize;
/* Alignment. Calculated by the DST itself. */
unsigned int crypto_attached_size;
/* Number of threads to perform crypto operations */
int thread_num;
};
/* Export security attributes have this bits checked in when client connects */
#define DST_PERM_READ (1<<0)
#define DST_PERM_WRITE (1<<1)
/*
* Right now it is simple model, where each remote address
* is assigned to set of permissions it is allowed to perform.
* In real world block device does not know anything but
* reading and writing, so it should be more than enough.
*/
struct dst_secure_user
{
unsigned int permissions;
struct saddr addr;
};
/*
* Export control command: device to export and network address to accept
* clients to work with given device
*/
struct dst_export_ctl
{
char device[DST_NAMELEN];
struct dst_network_ctl ctl;
};
enum {
DST_CFG = 1, /* Request remote configuration */
DST_IO, /* IO command */
DST_IO_RESPONSE, /* IO response */
DST_PING, /* Keepalive message */
DST_NCMD_MAX,
};
struct dst_cmd
{
/* Network command itself, see above */
__u32 cmd;
/*
* Size of the attached data
* (in most cases, for READ command it means how many bytes were requested)
*/
__u32 size;
/* Crypto size: number of attached bytes with digest/hmac */
__u32 csize;
/* Here we can carry secret data */
__u32 reserved;
/* Read/write bits, see how they are encoded in bio structure */
__u64 rw;
/* BIO flags */
__u64 flags;
/* Unique command id (like transaction ID) */
__u64 id;
/* Sector to start IO from */
__u64 sector;
/* Hash data is placed after this header */
__u8 hash[0];
};
/*
* Convert command to/from network byte order.
* We do not use hton*() functions, since there is
* no 64-bit implementation.
*/
static inline void dst_convert_cmd(struct dst_cmd *c)
{
c->cmd = __cpu_to_be32(c->cmd);
c->csize = __cpu_to_be32(c->csize);
c->size = __cpu_to_be32(c->size);
c->sector = __cpu_to_be64(c->sector);
c->id = __cpu_to_be64(c->id);
c->flags = __cpu_to_be64(c->flags);
c->rw = __cpu_to_be64(c->rw);
}
/* Transaction id */
typedef __u64 dst_gen_t;
#ifdef __KERNEL__
#include <linux/blkdev.h>
#include <linux/bio.h>
#include <linux/device.h>
#include <linux/mempool.h>
#include <linux/net.h>
#include <linux/poll.h>
#include <linux/rbtree.h>
#ifdef CONFIG_DST_DEBUG
#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
#else
static inline void __attribute__ ((format (printf, 1, 2)))
dprintk(const char *fmt, ...) {}
#endif
struct dst_node;
struct dst_trans
{
/* DST node we are working with */
struct dst_node *n;
/* Entry inside transaction tree */
struct rb_node trans_entry;
/* Merlin kills this transaction when this memory cell equals zero */
atomic_t refcnt;
/* How this transaction should be processed by crypto engine */
short enc;
/* How many times this transaction was resent */
short retries;
/* Completion status */
int error;
/* When did we send it to the remote peer */
long send_time;
/* My name is...
* Well, computers does not speak, they have unique id instead */
dst_gen_t gen;
/* Block IO we are working with */
struct bio *bio;
/* Network command for above block IO request */
struct dst_cmd cmd;
};
struct dst_crypto_engine
{
/* What should we do with all block requests */
struct crypto_hash *hash;
struct crypto_ablkcipher *cipher;
/* Pool of pages used to encrypt data into before sending */
int page_num;
struct page **pages;
/* What to do with current request */
int enc;
/* Who we are and where do we go */
struct scatterlist *src, *dst;
/* Maximum timeout waiting for encryption to be completed */
long timeout;
/* IV is a 64-bit sequential counter */
u64 iv;
/* Secret data */
void *private;
/* Cached temporary data lives here */
int size;
void *data;
};
struct dst_state
{
/* The main state protection */
struct mutex state_lock;
/* Polling machinery for sockets */
wait_queue_t wait;
wait_queue_head_t *whead;
/* Most of events are being waited here */
wait_queue_head_t thread_wait;
/* Who owns this? */
struct dst_node *node;
/* Network address for this state */
struct dst_network_ctl ctl;
/* Permissions to work with: read-only or rw connection */
u32 permissions;
/* Called when we need to clean private data */
void (* cleanup)(struct dst_state *st);
/* Used by the server: BIO completion queues BIOs here */
struct list_head request_list;
spinlock_t request_lock;
/* Guess what? No, it is not number of planets */
atomic_t refcnt;
/* This flags is set when connection should be dropped */
int need_exit;
/*
* Socket to work with. Second pointer is used for
* lockless check if socket was changed before performing
* next action (like working with cached polling result)
*/
struct socket *socket, *read_socket;
/* Cached preallocated data */
void *data;
unsigned int size;
/* Currently processed command */
struct dst_cmd cmd;
};
struct dst_info
{
/* Device size */
u64 size;
/* Local device name for export devices */
char local[DST_NAMELEN];
/* Network setup */
struct dst_network_ctl net;
/* Sysfs bits use this */
struct device device;
};
struct dst_node
{
struct list_head node_entry;
/* Hi, my name is stored here */
char name[DST_NAMELEN];
/* My cache name is stored here */
char cache_name[DST_NAMELEN];
/* Block device attached to given node.
* Only valid for exporting nodes */
struct block_device *bdev;
/* Network state machine for given peer */
struct dst_state *state;
/* Block IO machinery */
struct request_queue *queue;
struct gendisk *disk;
/* Number of threads in processing pool */
int thread_num;
/* Maximum number of pages in single IO */
int max_pages;
/* I'm that big in bytes */
loff_t size;
/* Exported to userspace node information */
struct dst_info *info;
/*
* Security attribute list.
* Used only by exporting node currently.
*/
struct list_head security_list;
struct mutex security_lock;
/*
* When this unerflows below zero, university collapses.
* But this will not happen, since node will be freed,
* when reference counter reaches zero.
*/
atomic_t refcnt;
/* How precisely should I be started? */
int (*start)(struct dst_node *);
/* Crypto capabilities */
struct dst_crypto_ctl crypto;
u8 *hash_key;
u8 *cipher_key;
/* Pool of processing thread */
struct thread_pool *pool;
/* Transaction IDs live here */
atomic_long_t gen;
/*
* How frequently and how many times transaction
* tree should be scanned to drop stale objects.
*/
long trans_scan_timeout;
int trans_max_retries;
/* Small gnomes live here */
struct rb_root trans_root;
struct mutex trans_lock;
/*
* Transaction cache/memory pool.
* It is big enough to contain not only transaction
* itself, but additional crypto data (digest/hmac).
*/
struct kmem_cache *trans_cache;
mempool_t *trans_pool;
/* This entity scans transaction tree */
struct delayed_work trans_work;
wait_queue_head_t wait;
};
/* Kernel representation of the security attribute */
struct dst_secure
{
struct list_head sec_entry;
struct dst_secure_user sec;
};
int dst_process_bio(struct dst_node *n, struct bio *bio);
int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
static inline struct dst_state *dst_state_get(struct dst_state *st)
{
BUG_ON(atomic_read(&st->refcnt) == 0);
atomic_inc(&st->refcnt);
return st;
}
void dst_state_put(struct dst_state *st);
struct dst_state *dst_state_alloc(struct dst_node *n);
int dst_state_socket_create(struct dst_state *st);
void dst_state_socket_release(struct dst_state *st);
void dst_state_exit_connected(struct dst_state *st);
int dst_state_schedule_receiver(struct dst_state *st);
void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
static inline void dst_state_lock(struct dst_state *st)
{
mutex_lock(&st->state_lock);
}
static inline void dst_state_unlock(struct dst_state *st)
{
mutex_unlock(&st->state_lock);
}
void dst_poll_exit(struct dst_state *st);
int dst_poll_init(struct dst_state *st);
static inline unsigned int dst_state_poll(struct dst_state *st)
{
unsigned int revents = POLLHUP | POLLERR;
dst_state_lock(st);
if (st->socket)
revents = st->socket->ops->poll(NULL, st->socket, NULL);
dst_state_unlock(st);
return revents;
}
static inline int dst_thread_setup(void *private, void *data)
{
return 0;
}
void dst_node_put(struct dst_node *n);
static inline struct dst_node *dst_node_get(struct dst_node *n)
{
atomic_inc(&n->refcnt);
return n;
}
int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
int dst_recv_cdata(struct dst_state *st, void *cdata);
int dst_data_send_header(struct socket *sock,
void *data, unsigned int size, int more);
int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
int dst_process_io(struct dst_state *st);
int dst_export_crypto(struct dst_node *n, struct bio *bio);
int dst_export_send_bio(struct bio *bio);
int dst_start_export(struct dst_node *n);
int __init dst_export_init(void);
void dst_export_exit(void);
/* Private structure for export block IO requests */
struct dst_export_priv
{
struct list_head request_entry;
struct dst_state *state;
struct bio *bio;
struct dst_cmd cmd;
};
static inline void dst_trans_get(struct dst_trans *t)
{
atomic_inc(&t->refcnt);
}
struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
int dst_trans_remove(struct dst_trans *t);
int dst_trans_remove_nolock(struct dst_trans *t);
void dst_trans_put(struct dst_trans *t);
/*
* Convert bio into network command.
*/
static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
u32 command, u64 id)
{
cmd->cmd = command;
cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
cmd->rw = bio->bi_rw;
cmd->size = bio->bi_size;
cmd->csize = 0;
cmd->id = id;
cmd->sector = bio->bi_sector;
};
int dst_trans_send(struct dst_trans *t);
int dst_trans_crypto(struct dst_trans *t);
int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
void dst_node_crypto_exit(struct dst_node *n);
static inline int dst_need_crypto(struct dst_node *n)
{
struct dst_crypto_ctl *c = &n->crypto;
/*
* Logical OR is appropriate here, but boolean one produces
* more optimal code, so it is used instead.
*/
return (c->hash_algo[0] | c->cipher_algo[0]);
}
int dst_node_trans_init(struct dst_node *n, unsigned int size);
void dst_node_trans_exit(struct dst_node *n);
/*
* Pool of threads.
* Ready list contains threads currently free to be used,
* active one contains threads with some work scheduled for them.
* Caller can wait in given queue when thread is ready.
*/
struct thread_pool
{
int thread_num;
struct mutex thread_lock;
struct list_head ready_list, active_list;
wait_queue_head_t wait;
};
void thread_pool_del_worker(struct thread_pool *p);
void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
int thread_pool_add_worker(struct thread_pool *p,
char *name,
unsigned int id,
void *(* init)(void *data),
void (* cleanup)(void *data),
void *data);
void thread_pool_destroy(struct thread_pool *p);
struct thread_pool *thread_pool_create(int num, char *name,
void *(* init)(void *data),
void (* cleanup)(void *data),
void *data);
int thread_pool_schedule(struct thread_pool *p,
int (* setup)(void *stored_private, void *setup_data),
int (* action)(void *stored_private, void *setup_data),
void *setup_data, long timeout);
int thread_pool_schedule_private(struct thread_pool *p,
int (* setup)(void *private, void *data),
int (* action)(void *private, void *data),
void *data, long timeout, void *id);
#endif /* __KERNEL__ */
#endif /* __DST_H */

View File

@@ -76,7 +76,7 @@ struct audio_karaoke{ /* if Vocal1 or Vocal2 are non-zero, they get mixed */
} audio_karaoke_t; /* into left and right */
typedef uint16_t audio_attributes_t;
typedef __u16 audio_attributes_t;
/* bits: descr. */
/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
/* 12 multichannel extension */

View File

@@ -132,12 +132,12 @@ struct video_command {
#define VIDEO_VSYNC_FIELD_PROGRESSIVE (3)
struct video_event {
int32_t type;
__s32 type;
#define VIDEO_EVENT_SIZE_CHANGED 1
#define VIDEO_EVENT_FRAME_RATE_CHANGED 2
#define VIDEO_EVENT_DECODER_STOPPED 3
#define VIDEO_EVENT_VSYNC 4
time_t timestamp;
__kernel_time_t timestamp;
union {
video_size_t size;
unsigned int frame_rate; /* in frames per 1000sec */
@@ -157,25 +157,25 @@ struct video_status {
struct video_still_picture {
char __user *iFrame; /* pointer to a single iframe in memory */
int32_t size;
__s32 size;
};
typedef
struct video_highlight {
int active; /* 1=show highlight, 0=hide highlight */
uint8_t contrast1; /* 7- 4 Pattern pixel contrast */
__u8 contrast1; /* 7- 4 Pattern pixel contrast */
/* 3- 0 Background pixel contrast */
uint8_t contrast2; /* 7- 4 Emphasis pixel-2 contrast */
__u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
/* 3- 0 Emphasis pixel-1 contrast */
uint8_t color1; /* 7- 4 Pattern pixel color */
__u8 color1; /* 7- 4 Pattern pixel color */
/* 3- 0 Background pixel color */
uint8_t color2; /* 7- 4 Emphasis pixel-2 color */
__u8 color2; /* 7- 4 Emphasis pixel-2 color */
/* 3- 0 Emphasis pixel-1 color */
uint32_t ypos; /* 23-22 auto action mode */
__u32 ypos; /* 23-22 auto action mode */
/* 21-12 start y */
/* 9- 0 end y */
uint32_t xpos; /* 23-22 button color number */
__u32 xpos; /* 23-22 button color number */
/* 21-12 start x */
/* 9- 0 end x */
} video_highlight_t;
@@ -189,17 +189,17 @@ typedef struct video_spu {
typedef struct video_spu_palette { /* SPU Palette information */
int length;
uint8_t __user *palette;
__u8 __user *palette;
} video_spu_palette_t;
typedef struct video_navi_pack {
int length; /* 0 ... 1024 */
uint8_t data[1024];
__u8 data[1024];
} video_navi_pack_t;
typedef uint16_t video_attributes_t;
typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
/* 13-12 TV system (0=525/60, 1=625/50) */

Some files were not shown because too many files have changed in this diff Show More