Merge tag 'v4.5-rc5' into devel
Linux 4.5-rc5
This commit is contained in:
@@ -133,6 +133,5 @@ extern int acpi_get_psd_map(struct cpudata **);
|
||||
/* Methods to interact with the PCC mailbox controller. */
|
||||
extern struct mbox_chan *
|
||||
pcc_mbox_request_channel(struct mbox_client *, unsigned int);
|
||||
extern int mbox_send_message(struct mbox_chan *chan, void *mssg);
|
||||
|
||||
#endif /* _CPPC_ACPI_H*/
|
||||
|
||||
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
|
||||
*/
|
||||
static inline cputime_t timespec_to_cputime(const struct timespec *val)
|
||||
{
|
||||
u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
|
||||
u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
|
||||
return (__force cputime_t) ret;
|
||||
}
|
||||
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
|
||||
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
|
||||
*/
|
||||
static inline cputime_t timeval_to_cputime(const struct timeval *val)
|
||||
{
|
||||
u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
|
||||
u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
|
||||
val->tv_usec * NSEC_PER_USEC;
|
||||
return (__force cputime_t) ret;
|
||||
}
|
||||
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
|
||||
|
||||
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
||||
pmd_t *pmdp);
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
|
||||
static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp)
|
||||
{
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __HAVE_ARCH_PTE_SAME
|
||||
static inline int pte_same(pte_t pte_a, pte_t pte_b)
|
||||
{
|
||||
|
||||
@@ -42,6 +42,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state,
|
||||
bool async);
|
||||
|
||||
bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
|
||||
struct drm_atomic_state *old_state);
|
||||
|
||||
|
||||
@@ -35,4 +35,13 @@
|
||||
|
||||
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
|
||||
|
||||
static inline bool drm_arch_can_wc_memory(void)
|
||||
{
|
||||
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1166,6 +1166,7 @@ struct drm_connector {
|
||||
struct drm_mode_object base;
|
||||
|
||||
char *name;
|
||||
int connector_id;
|
||||
int connector_type;
|
||||
int connector_type_id;
|
||||
bool interlace_allowed;
|
||||
@@ -2047,6 +2048,7 @@ struct drm_mode_config {
|
||||
struct list_head fb_list;
|
||||
|
||||
int num_connector;
|
||||
struct ida connector_ida;
|
||||
struct list_head connector_list;
|
||||
int num_encoder;
|
||||
struct list_head encoder_list;
|
||||
@@ -2200,7 +2202,11 @@ int drm_connector_register(struct drm_connector *connector);
|
||||
void drm_connector_unregister(struct drm_connector *connector);
|
||||
|
||||
extern void drm_connector_cleanup(struct drm_connector *connector);
|
||||
extern unsigned int drm_connector_index(struct drm_connector *connector);
|
||||
static inline unsigned drm_connector_index(struct drm_connector *connector)
|
||||
{
|
||||
return connector->connector_id;
|
||||
}
|
||||
|
||||
/* helper to unplug all connectors from sysfs for device */
|
||||
extern void drm_connector_unplug_all(struct drm_device *dev);
|
||||
|
||||
|
||||
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
|
||||
/**
|
||||
* struct drm_dp_mst_port - MST port
|
||||
* @kref: reference count for this port.
|
||||
* @guid_valid: for DP 1.2 devices if we have validated the GUID.
|
||||
* @guid: guid for DP 1.2 device on this port.
|
||||
* @port_num: port number
|
||||
* @input: if this port is an input port.
|
||||
* @mcs: message capability status - DP 1.2 spec.
|
||||
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
|
||||
struct drm_dp_mst_port {
|
||||
struct kref kref;
|
||||
|
||||
/* if dpcd 1.2 device is on this port - its GUID info */
|
||||
bool guid_valid;
|
||||
u8 guid[16];
|
||||
|
||||
u8 port_num;
|
||||
bool input;
|
||||
bool mcs;
|
||||
@@ -110,10 +104,12 @@ struct drm_dp_mst_port {
|
||||
* @tx_slots: transmission slots for this device.
|
||||
* @last_seqno: last sequence number used to talk to this.
|
||||
* @link_address_sent: if a link address message has been sent to this device yet.
|
||||
* @guid: guid for DP 1.2 branch device. port under this branch can be
|
||||
* identified by port #.
|
||||
*
|
||||
* This structure represents an MST branch device, there is one
|
||||
* primary branch device at the root, along with any others connected
|
||||
* to downstream ports
|
||||
* primary branch device at the root, along with any other branches connected
|
||||
* to downstream port of parent branches.
|
||||
*/
|
||||
struct drm_dp_mst_branch {
|
||||
struct kref kref;
|
||||
@@ -132,6 +128,9 @@ struct drm_dp_mst_branch {
|
||||
struct drm_dp_sideband_msg_tx *tx_slots[2];
|
||||
int last_seqno;
|
||||
bool link_address_sent;
|
||||
|
||||
/* global unique identifier to identify branch devices */
|
||||
u8 guid[16];
|
||||
};
|
||||
|
||||
|
||||
@@ -406,11 +405,9 @@ struct drm_dp_payload {
|
||||
* @conn_base_id: DRM connector ID this mgr is connected to.
|
||||
* @down_rep_recv: msg receiver state for down replies.
|
||||
* @up_req_recv: msg receiver state for up requests.
|
||||
* @lock: protects mst state, primary, guid, dpcd.
|
||||
* @lock: protects mst state, primary, dpcd.
|
||||
* @mst_state: if this manager is enabled for an MST capable port.
|
||||
* @mst_primary: pointer to the primary branch device.
|
||||
* @guid_valid: GUID valid for the primary branch device.
|
||||
* @guid: GUID for primary port.
|
||||
* @dpcd: cache of DPCD for primary port.
|
||||
* @pbn_div: PBN to slots divisor.
|
||||
*
|
||||
@@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr {
|
||||
struct drm_dp_sideband_msg_rx up_req_recv;
|
||||
|
||||
/* pointer to info about the initial MST device */
|
||||
struct mutex lock; /* protects mst_state + primary + guid + dpcd */
|
||||
struct mutex lock; /* protects mst_state + primary + dpcd */
|
||||
|
||||
bool mst_state;
|
||||
struct drm_dp_mst_branch *mst_primary;
|
||||
/* primary MST device GUID */
|
||||
bool guid_valid;
|
||||
u8 guid[16];
|
||||
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
u8 sink_count;
|
||||
int pbn_div;
|
||||
|
||||
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
|
||||
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
|
||||
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
|
||||
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
|
||||
#define DRM_FIXED_EPSILON 1LL
|
||||
#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
|
||||
|
||||
static inline s64 drm_int2fixp(int a)
|
||||
{
|
||||
return ((s64)a) << DRM_FIXED_POINT;
|
||||
}
|
||||
|
||||
static inline int drm_fixp2int(int64_t a)
|
||||
static inline int drm_fixp2int(s64 a)
|
||||
{
|
||||
return ((s64)a) >> DRM_FIXED_POINT;
|
||||
}
|
||||
|
||||
static inline unsigned drm_fixp_msbset(int64_t a)
|
||||
static inline int drm_fixp2int_ceil(s64 a)
|
||||
{
|
||||
if (a > 0)
|
||||
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
|
||||
else
|
||||
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
|
||||
}
|
||||
|
||||
static inline unsigned drm_fixp_msbset(s64 a)
|
||||
{
|
||||
unsigned shift, sign = (a >> 63) & 1;
|
||||
|
||||
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
|
||||
{
|
||||
s64 res;
|
||||
bool a_neg = a < 0;
|
||||
bool b_neg = b < 0;
|
||||
u64 a_abs = a_neg ? -a : a;
|
||||
u64 b_abs = b_neg ? -b : b;
|
||||
u64 rem;
|
||||
|
||||
/* determine integer part */
|
||||
u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
|
||||
|
||||
/* determine fractional part */
|
||||
{
|
||||
u32 i = DRM_FIXED_POINT;
|
||||
|
||||
do {
|
||||
rem <<= 1;
|
||||
res_abs <<= 1;
|
||||
if (rem >= b_abs) {
|
||||
res_abs |= 1;
|
||||
rem -= b_abs;
|
||||
}
|
||||
} while (--i != 0);
|
||||
}
|
||||
|
||||
/* round up LSB */
|
||||
{
|
||||
u64 summand = (rem << 1) >= b_abs;
|
||||
|
||||
res_abs += summand;
|
||||
}
|
||||
|
||||
res = (s64) res_abs;
|
||||
if (a_neg ^ b_neg)
|
||||
res = -res;
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline s64 drm_fixp_exp(s64 x)
|
||||
{
|
||||
s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
|
||||
|
||||
@@ -126,7 +126,7 @@
|
||||
/* 104 */
|
||||
/* 105 */
|
||||
#define TEGRA210_CLK_D_AUDIO 106
|
||||
/* 107 ( affects abp -> ape) */
|
||||
#define TEGRA210_CLK_APB2APE 107
|
||||
/* 108 */
|
||||
/* 109 */
|
||||
/* 110 */
|
||||
|
||||
@@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||
/*
|
||||
* q->prep_rq_fn return values
|
||||
*/
|
||||
#define BLKPREP_OK 0 /* serve it */
|
||||
#define BLKPREP_KILL 1 /* fatal error, kill */
|
||||
#define BLKPREP_DEFER 2 /* leave on queue */
|
||||
enum {
|
||||
BLKPREP_OK, /* serve it */
|
||||
BLKPREP_KILL, /* fatal error, kill, return -EIO */
|
||||
BLKPREP_DEFER, /* leave on queue */
|
||||
BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
|
||||
};
|
||||
|
||||
extern unsigned long blk_max_low_pfn, blk_max_pfn;
|
||||
|
||||
|
||||
@@ -63,6 +63,18 @@
|
||||
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
|
||||
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
|
||||
#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
|
||||
#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
|
||||
#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
|
||||
#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
|
||||
#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
|
||||
#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
|
||||
#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
|
||||
#define CEPH_FEATURE_NEW_OSDOP_ENCODING (1ULL<<56) /* New, v7 encoding */
|
||||
#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
|
||||
#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
|
||||
#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */
|
||||
// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
|
||||
#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */
|
||||
|
||||
/*
|
||||
* The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
|
||||
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
|
||||
CEPH_FEATURE_CRUSH_TUNABLES3 | \
|
||||
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
|
||||
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
|
||||
CEPH_FEATURE_CRUSH_V4)
|
||||
CEPH_FEATURE_CRUSH_V4 | \
|
||||
CEPH_FEATURE_CRUSH_TUNABLES5 | \
|
||||
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
|
||||
|
||||
#define CEPH_FEATURES_REQUIRED_DEFAULT \
|
||||
(CEPH_FEATURE_NOSRCADDR | \
|
||||
|
||||
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
|
||||
*/
|
||||
u64 serial_nr;
|
||||
|
||||
/*
|
||||
* Incremented by online self and children. Used to guarantee that
|
||||
* parents are not offlined before their children.
|
||||
*/
|
||||
atomic_t online_cnt;
|
||||
|
||||
/* percpu_ref killing and RCU release */
|
||||
struct rcu_head rcu_head;
|
||||
struct work_struct destroy_work;
|
||||
|
||||
@@ -37,7 +37,7 @@ struct cleancache_ops {
|
||||
void (*invalidate_fs)(int);
|
||||
};
|
||||
|
||||
extern int cleancache_register_ops(struct cleancache_ops *ops);
|
||||
extern int cleancache_register_ops(const struct cleancache_ops *ops);
|
||||
extern void __cleancache_init_fs(struct super_block *);
|
||||
extern void __cleancache_init_shared_fs(struct super_block *);
|
||||
extern int __cleancache_get_page(struct page *);
|
||||
@@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *);
|
||||
|
||||
#ifdef CONFIG_CLEANCACHE
|
||||
#define cleancache_enabled (1)
|
||||
static inline bool cleancache_fs_enabled(struct page *page)
|
||||
{
|
||||
return page->mapping->host->i_sb->cleancache_poolid >= 0;
|
||||
}
|
||||
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
|
||||
{
|
||||
return mapping->host->i_sb->cleancache_poolid >= 0;
|
||||
}
|
||||
static inline bool cleancache_fs_enabled(struct page *page)
|
||||
{
|
||||
return cleancache_fs_enabled_mapping(page->mapping);
|
||||
}
|
||||
#else
|
||||
#define cleancache_enabled (0)
|
||||
#define cleancache_fs_enabled(_page) (0)
|
||||
@@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb)
|
||||
|
||||
static inline int cleancache_get_page(struct page *page)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (cleancache_enabled && cleancache_fs_enabled(page))
|
||||
ret = __cleancache_get_page(page);
|
||||
return ret;
|
||||
return __cleancache_get_page(page);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void cleancache_put_page(struct page *page)
|
||||
|
||||
@@ -144,7 +144,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
||||
*/
|
||||
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
|
||||
#define __trace_if(cond) \
|
||||
if (__builtin_constant_p((cond)) ? !!(cond) : \
|
||||
if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
|
||||
({ \
|
||||
int ______r; \
|
||||
static struct ftrace_branch_data \
|
||||
|
||||
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
|
||||
task_unlock(current);
|
||||
}
|
||||
|
||||
extern void cpuset_post_attach_flush(void);
|
||||
|
||||
#else /* !CONFIG_CPUSETS */
|
||||
|
||||
static inline bool cpusets_enabled(void) { return false; }
|
||||
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void cpuset_post_attach_flush(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_CPUSETS */
|
||||
|
||||
#endif /* _LINUX_CPUSET_H */
|
||||
|
||||
@@ -59,7 +59,8 @@ enum {
|
||||
CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
|
||||
CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
|
||||
CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
|
||||
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
|
||||
CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
|
||||
CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -205,6 +206,11 @@ struct crush_map {
|
||||
* mappings line up a bit better with previous mappings. */
|
||||
__u8 chooseleaf_vary_r;
|
||||
|
||||
/* if true, it makes chooseleaf firstn to return stable results (if
|
||||
* no local retry) so that data migrations would be optimal when some
|
||||
* device fails. */
|
||||
__u8 chooseleaf_stable;
|
||||
|
||||
#ifndef __KERNEL__
|
||||
/*
|
||||
* version 0 (original) of straw_calc has various flaws. version 1
|
||||
|
||||
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
|
||||
dax_iodone_t);
|
||||
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
|
||||
dax_iodone_t);
|
||||
|
||||
#ifdef CONFIG_FS_DAX
|
||||
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
|
||||
#else
|
||||
static inline struct page *read_dax_sector(struct block_device *bdev,
|
||||
sector_t n)
|
||||
{
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
|
||||
unsigned int flags, get_block_t, dax_iodone_t);
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
|
||||
int devpts_new_index(struct inode *ptmx_inode);
|
||||
void devpts_kill_index(struct inode *ptmx_inode, int idx);
|
||||
void devpts_add_ref(struct inode *ptmx_inode);
|
||||
void devpts_del_ref(struct inode *ptmx_inode);
|
||||
/* mknod in devpts */
|
||||
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
|
||||
void *priv);
|
||||
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
|
||||
/* Dummy stubs in the no-pty case */
|
||||
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
|
||||
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
|
||||
static inline void devpts_add_ref(struct inode *ptmx_inode) { }
|
||||
static inline void devpts_del_ref(struct inode *ptmx_inode) { }
|
||||
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
|
||||
dev_t device, int index, void *priv)
|
||||
{
|
||||
|
||||
@@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
|
||||
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
|
||||
struct list_head *head, bool remove);
|
||||
|
||||
bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
|
||||
bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
|
||||
unsigned long data_size);
|
||||
bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
|
||||
size_t len);
|
||||
|
||||
extern struct work_struct efivar_work;
|
||||
void efivar_run_worker(void);
|
||||
|
||||
@@ -484,9 +484,6 @@ struct block_device {
|
||||
int bd_fsfreeze_count;
|
||||
/* Mutex for freeze */
|
||||
struct mutex bd_fsfreeze_mutex;
|
||||
#ifdef CONFIG_FS_DAX
|
||||
int bd_map_count;
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
|
||||
|
||||
static inline bool io_is_direct(struct file *filp)
|
||||
{
|
||||
return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
|
||||
return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
|
||||
}
|
||||
|
||||
static inline int iocb_flags(struct file *file)
|
||||
|
||||
@@ -220,10 +220,7 @@ struct fsnotify_mark {
|
||||
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
|
||||
* mark into destroy_list when it's waiting for the end of SRCU period
|
||||
* before it can be freed. [group->mark_mutex] */
|
||||
union {
|
||||
struct list_head g_list;
|
||||
struct rcu_head g_rcu;
|
||||
};
|
||||
struct list_head g_list;
|
||||
/* Protects inode / mnt pointers, flags, masks */
|
||||
spinlock_t lock;
|
||||
/* List of marks for inode / vfsmount [obj_lock] */
|
||||
|
||||
@@ -165,7 +165,6 @@ struct ftrace_ops {
|
||||
ftrace_func_t saved_func;
|
||||
int __percpu *disabled;
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
int nr_trampolines;
|
||||
struct ftrace_ops_hash local_hash;
|
||||
struct ftrace_ops_hash *func_hash;
|
||||
struct ftrace_ops_hash old_hash;
|
||||
@@ -604,6 +603,7 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
|
||||
|
||||
extern int skip_trace(unsigned long ip);
|
||||
extern void ftrace_module_init(struct module *mod);
|
||||
extern void ftrace_module_enable(struct module *mod);
|
||||
extern void ftrace_release_mod(struct module *mod);
|
||||
|
||||
extern void ftrace_disable_daemon(void);
|
||||
@@ -613,8 +613,9 @@ static inline int skip_trace(unsigned long ip) { return 0; }
|
||||
static inline int ftrace_force_update(void) { return 0; }
|
||||
static inline void ftrace_disable_daemon(void) { }
|
||||
static inline void ftrace_enable_daemon(void) { }
|
||||
static inline void ftrace_release_mod(struct module *mod) {}
|
||||
static inline void ftrace_module_init(struct module *mod) {}
|
||||
static inline void ftrace_module_init(struct module *mod) { }
|
||||
static inline void ftrace_module_enable(struct module *mod) { }
|
||||
static inline void ftrace_release_mod(struct module *mod) { }
|
||||
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
||||
@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
|
||||
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
|
||||
/* The below functions must be run on a range from a single zone. */
|
||||
extern int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
unsigned migratetype);
|
||||
extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
/* CMA stuff */
|
||||
extern void init_cma_reserved_pageblock(struct page *page);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_GFP_H */
|
||||
|
||||
@@ -87,7 +87,8 @@ enum hrtimer_restart {
|
||||
* @function: timer expiry callback function
|
||||
* @base: pointer to the timer base (per cpu and per clock)
|
||||
* @state: state information (See bit values above)
|
||||
* @start_pid: timer statistics field to store the pid of the task which
|
||||
* @is_rel: Set if the timer was armed relative
|
||||
* @start_pid: timer statistics field to store the pid of the task which
|
||||
* started the timer
|
||||
* @start_site: timer statistics field to store the site where the timer
|
||||
* was started
|
||||
@@ -101,7 +102,8 @@ struct hrtimer {
|
||||
ktime_t _softexpires;
|
||||
enum hrtimer_restart (*function)(struct hrtimer *);
|
||||
struct hrtimer_clock_base *base;
|
||||
unsigned long state;
|
||||
u8 state;
|
||||
u8 is_rel;
|
||||
#ifdef CONFIG_TIMER_STATS
|
||||
int start_pid;
|
||||
void *start_site;
|
||||
@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
|
||||
|
||||
#endif
|
||||
|
||||
static inline ktime_t
|
||||
__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
|
||||
{
|
||||
ktime_t rem = ktime_sub(timer->node.expires, now);
|
||||
|
||||
/*
|
||||
* Adjust relative timers for the extra we added in
|
||||
* hrtimer_start_range_ns() to prevent short timeouts.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
|
||||
rem.tv64 -= hrtimer_resolution;
|
||||
return rem;
|
||||
}
|
||||
|
||||
static inline ktime_t
|
||||
hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
|
||||
{
|
||||
return __hrtimer_expires_remaining_adjusted(timer,
|
||||
timer->base->get_time());
|
||||
}
|
||||
|
||||
extern void clock_was_set(void);
|
||||
#ifdef CONFIG_TIMERFD
|
||||
extern void timerfd_clock_was_set(void);
|
||||
@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
|
||||
}
|
||||
|
||||
/* Query timers: */
|
||||
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
|
||||
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
|
||||
|
||||
static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
|
||||
{
|
||||
return __hrtimer_get_remaining(timer, false);
|
||||
}
|
||||
|
||||
extern u64 hrtimer_get_next_event(void);
|
||||
|
||||
|
||||
@@ -235,6 +235,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
|
||||
/* low 64 bit */
|
||||
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
|
||||
|
||||
/* PRS_REG */
|
||||
#define DMA_PRS_PPR ((u32)1)
|
||||
|
||||
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
|
||||
do { \
|
||||
cycles_t start_time = get_cycles(); \
|
||||
|
||||
@@ -133,8 +133,9 @@ struct iommu_dm_region {
|
||||
|
||||
/**
|
||||
* struct iommu_ops - iommu ops and capabilities
|
||||
* @domain_init: init iommu domain
|
||||
* @domain_destroy: destroy iommu domain
|
||||
* @capable: check capability
|
||||
* @domain_alloc: allocate iommu domain
|
||||
* @domain_free: free iommu domain
|
||||
* @attach_dev: attach device to an iommu domain
|
||||
* @detach_dev: detach device from an iommu domain
|
||||
* @map: map a physically contiguous memory region to an iommu domain
|
||||
@@ -144,8 +145,15 @@ struct iommu_dm_region {
|
||||
* @iova_to_phys: translate iova to physical address
|
||||
* @add_device: add device to iommu grouping
|
||||
* @remove_device: remove device from iommu grouping
|
||||
* @device_group: find iommu group for a particular device
|
||||
* @domain_get_attr: Query domain attributes
|
||||
* @domain_set_attr: Change domain attributes
|
||||
* @get_dm_regions: Request list of direct mapping requirements for a device
|
||||
* @put_dm_regions: Free list of direct mapping requirements for a device
|
||||
* @domain_window_enable: Configure and enable a particular window for a domain
|
||||
* @domain_window_disable: Disable a particular window for a domain
|
||||
* @domain_set_windows: Set the number of windows for a domain
|
||||
* @domain_get_windows: Return the number of windows for a domain
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @pgsize_bitmap: bitmap of supported page sizes
|
||||
* @priv: per-instance data private to the iommu driver
|
||||
@@ -182,9 +190,9 @@ struct iommu_ops {
|
||||
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot);
|
||||
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
|
||||
/* Set the numer of window per domain */
|
||||
/* Set the number of windows per domain */
|
||||
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
|
||||
/* Get the numer of window per domain */
|
||||
/* Get the number of windows per domain */
|
||||
u32 (*domain_get_windows)(struct iommu_domain *domain);
|
||||
|
||||
#ifdef CONFIG_OF_IOMMU
|
||||
|
||||
@@ -70,6 +70,7 @@ struct irq_fwspec {
|
||||
*/
|
||||
enum irq_domain_bus_token {
|
||||
DOMAIN_BUS_ANY = 0,
|
||||
DOMAIN_BUS_WIRED,
|
||||
DOMAIN_BUS_PCI_MSI,
|
||||
DOMAIN_BUS_PLATFORM_MSI,
|
||||
DOMAIN_BUS_NEXUS,
|
||||
|
||||
@@ -526,6 +526,7 @@ enum ata_lpm_policy {
|
||||
enum ata_lpm_hints {
|
||||
ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
|
||||
ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
|
||||
ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */
|
||||
};
|
||||
|
||||
/* forward declarations */
|
||||
|
||||
@@ -135,6 +135,10 @@ enum {
|
||||
/* Memory types */
|
||||
NVM_ID_FMTYPE_SLC = 0,
|
||||
NVM_ID_FMTYPE_MLC = 1,
|
||||
|
||||
/* Device capabilities */
|
||||
NVM_ID_DCAP_BBLKMGMT = 0x1,
|
||||
NVM_UD_DCAP_ECC = 0x2,
|
||||
};
|
||||
|
||||
struct nvm_id_lp_mlc {
|
||||
|
||||
@@ -66,7 +66,7 @@ struct lock_class {
|
||||
/*
|
||||
* class-hash:
|
||||
*/
|
||||
struct list_head hash_entry;
|
||||
struct hlist_node hash_entry;
|
||||
|
||||
/*
|
||||
* global list of all lock-classes:
|
||||
@@ -199,7 +199,7 @@ struct lock_chain {
|
||||
u8 irq_context;
|
||||
u8 depth;
|
||||
u16 base;
|
||||
struct list_head entry;
|
||||
struct hlist_node entry;
|
||||
u64 chain_key;
|
||||
};
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
|
||||
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
|
||||
MEM_CGROUP_STAT_NSTATS,
|
||||
/* default hierarchy stats */
|
||||
MEMCG_SOCK,
|
||||
MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
|
||||
MEMCG_NR_STAT,
|
||||
};
|
||||
|
||||
|
||||
@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
#define VM_STACK VM_GROWSUP
|
||||
#else
|
||||
#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
#define VM_STACK VM_GROWSDOWN
|
||||
#endif
|
||||
|
||||
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
|
||||
|
||||
/*
|
||||
* Special vmas that are non-mergable, non-mlock()able.
|
||||
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
|
||||
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
|
||||
!vma_growsup(vma->vm_next, addr);
|
||||
}
|
||||
|
||||
extern struct task_struct *task_of_stack(struct task_struct *task,
|
||||
struct vm_area_struct *vma, bool in_group);
|
||||
int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
|
||||
|
||||
extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long old_addr, struct vm_area_struct *new_vma,
|
||||
|
||||
@@ -424,9 +424,9 @@ struct mm_struct {
|
||||
unsigned long total_vm; /* Total pages mapped */
|
||||
unsigned long locked_vm; /* Pages that have PG_mlocked set */
|
||||
unsigned long pinned_vm; /* Refcount permanently increased */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
|
||||
unsigned long stack_vm; /* VM_GROWSUP/DOWN */
|
||||
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
|
||||
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
|
||||
unsigned long stack_vm; /* VM_STACK */
|
||||
unsigned long def_flags;
|
||||
unsigned long start_code, end_code, start_data, end_data;
|
||||
unsigned long start_brk, brk, start_stack;
|
||||
|
||||
@@ -682,6 +682,12 @@ typedef struct pglist_data {
|
||||
*/
|
||||
unsigned long first_deferred_pfn;
|
||||
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
spinlock_t split_queue_lock;
|
||||
struct list_head split_queue;
|
||||
unsigned long split_queue_len;
|
||||
#endif
|
||||
} pg_data_t;
|
||||
|
||||
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
|
||||
|
||||
@@ -324,6 +324,12 @@ struct module_layout {
|
||||
#define __module_layout_align
|
||||
#endif
|
||||
|
||||
struct mod_kallsyms {
|
||||
Elf_Sym *symtab;
|
||||
unsigned int num_symtab;
|
||||
char *strtab;
|
||||
};
|
||||
|
||||
struct module {
|
||||
enum module_state state;
|
||||
|
||||
@@ -405,15 +411,10 @@ struct module {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
/*
|
||||
* We keep the symbol and string tables for kallsyms.
|
||||
* The core_* fields below are temporary, loader-only (they
|
||||
* could really be discarded after module init).
|
||||
*/
|
||||
Elf_Sym *symtab, *core_symtab;
|
||||
unsigned int num_symtab, core_num_syms;
|
||||
char *strtab, *core_strtab;
|
||||
|
||||
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
|
||||
struct mod_kallsyms *kallsyms;
|
||||
struct mod_kallsyms core_kallsyms;
|
||||
|
||||
/* Section attributes */
|
||||
struct module_sect_attrs *sect_attrs;
|
||||
|
||||
|
||||
@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
|
||||
clear_bit(NAPI_STATE_NPSVC, &n->state);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* napi_synchronize - wait until NAPI is not running
|
||||
* @n: napi context
|
||||
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
|
||||
*/
|
||||
static inline void napi_synchronize(const struct napi_struct *n)
|
||||
{
|
||||
while (test_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep(1);
|
||||
if (IS_ENABLED(CONFIG_SMP))
|
||||
while (test_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep(1);
|
||||
else
|
||||
barrier();
|
||||
}
|
||||
#else
|
||||
# define napi_synchronize(n) barrier()
|
||||
#endif
|
||||
|
||||
enum netdev_queue_state_t {
|
||||
__QUEUE_STATE_DRV_XOFF,
|
||||
|
||||
@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
|
||||
return num;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#if defined(CONFIG_OF) && !defined(MODULE)
|
||||
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
|
||||
static const struct of_device_id __of_table_##name \
|
||||
__used __section(__##table##_of_table) \
|
||||
|
||||
@@ -634,9 +634,6 @@ struct perf_event_context {
|
||||
int nr_cgroups; /* cgroup evts */
|
||||
void *task_ctx_data; /* pmu specific data */
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
struct delayed_work orphans_remove;
|
||||
bool orphans_remove_sched;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
extern void perf_event_delayed_put(struct task_struct *task);
|
||||
extern struct perf_event *perf_event_get(unsigned int fd);
|
||||
extern struct file *perf_event_get(unsigned int fd);
|
||||
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
|
||||
extern void perf_event_print_debug(void);
|
||||
extern void perf_pmu_disable(struct pmu *pmu);
|
||||
@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
|
||||
extern u64 perf_swevent_set_period(struct perf_event *event);
|
||||
extern void perf_event_enable(struct perf_event *event);
|
||||
extern void perf_event_disable(struct perf_event *event);
|
||||
extern int __perf_event_disable(void *info);
|
||||
extern void perf_event_disable_local(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else /* !CONFIG_PERF_EVENTS: */
|
||||
static inline void *
|
||||
@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_event_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_event_free_task(struct task_struct *task) { }
|
||||
static inline void perf_event_delayed_put(struct task_struct *task) { }
|
||||
static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
|
||||
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
|
||||
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
* backing is indicated by flags in the high bits of the value.
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned long val;
|
||||
u64 val;
|
||||
} pfn_t;
|
||||
#endif
|
||||
|
||||
|
||||
@@ -9,14 +9,13 @@
|
||||
* PFN_DEV - pfn is not covered by system memmap by default
|
||||
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
|
||||
*/
|
||||
#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
|
||||
<< (BITS_PER_LONG - PAGE_SHIFT))
|
||||
#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
|
||||
#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
|
||||
#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
|
||||
#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
|
||||
#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
|
||||
#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
|
||||
#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
|
||||
#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
|
||||
#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
|
||||
|
||||
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
|
||||
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
|
||||
{
|
||||
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
|
||||
|
||||
@@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
|
||||
return __pfn_to_pfn_t(pfn, 0);
|
||||
}
|
||||
|
||||
extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
|
||||
extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
|
||||
|
||||
static inline bool pfn_t_has_page(pfn_t pfn)
|
||||
{
|
||||
@@ -48,7 +47,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
|
||||
static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
|
||||
{
|
||||
return PFN_PHYS(pfn_t_to_pfn(pfn));
|
||||
}
|
||||
@@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
|
||||
#ifdef __HAVE_ARCH_PTE_DEVMAP
|
||||
static inline bool pfn_t_devmap(pfn_t pfn)
|
||||
{
|
||||
const unsigned long flags = PFN_DEV|PFN_MAP;
|
||||
const u64 flags = PFN_DEV|PFN_MAP;
|
||||
|
||||
return (pfn.val & flags) == flags;
|
||||
}
|
||||
|
||||
@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
|
||||
void **radix_tree_next_chunk(struct radix_tree_root *root,
|
||||
struct radix_tree_iter *iter, unsigned flags);
|
||||
|
||||
/**
|
||||
* radix_tree_iter_retry - retry this chunk of the iteration
|
||||
* @iter: iterator state
|
||||
*
|
||||
* If we iterate over a tree protected only by the RCU lock, a race
|
||||
* against deletion or creation may result in seeing a slot for which
|
||||
* radix_tree_deref_retry() returns true. If so, call this function
|
||||
* and continue the iteration.
|
||||
*/
|
||||
static inline __must_check
|
||||
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
|
||||
{
|
||||
iter->next_index = iter->index;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* radix_tree_chunk_size - get current chunk size
|
||||
*
|
||||
* @iter: pointer to radix tree iterator
|
||||
* Returns: current chunk size
|
||||
*/
|
||||
static __always_inline unsigned
|
||||
static __always_inline long
|
||||
radix_tree_chunk_size(struct radix_tree_iter *iter)
|
||||
{
|
||||
return iter->next_index - iter->index;
|
||||
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
|
||||
return slot + offset + 1;
|
||||
}
|
||||
} else {
|
||||
unsigned size = radix_tree_chunk_size(iter) - 1;
|
||||
long size = radix_tree_chunk_size(iter);
|
||||
|
||||
while (size--) {
|
||||
while (--size > 0) {
|
||||
slot++;
|
||||
iter->index++;
|
||||
if (likely(*slot))
|
||||
|
||||
@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
|
||||
|
||||
# define jiffies raid6_jiffies()
|
||||
# define printk printf
|
||||
# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
|
||||
# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
|
||||
# define GFP_KERNEL 0
|
||||
# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
|
||||
PROT_READ|PROT_WRITE, \
|
||||
|
||||
@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
|
||||
__put_anon_vma(anon_vma);
|
||||
}
|
||||
|
||||
static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
if (anon_vma)
|
||||
up_write(&anon_vma->root->rwsem);
|
||||
}
|
||||
|
||||
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
|
||||
{
|
||||
down_write(&anon_vma->root->rwsem);
|
||||
|
||||
@@ -299,6 +299,7 @@ struct sk_buff;
|
||||
#else
|
||||
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
|
||||
#endif
|
||||
extern int sysctl_max_skb_frags;
|
||||
|
||||
typedef struct skb_frag_struct skb_frag_t;
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
#ifndef __LINUX_SWIOTLB_H
|
||||
#define __LINUX_SWIOTLB_H
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct device;
|
||||
struct dma_attrs;
|
||||
struct page;
|
||||
struct scatterlist;
|
||||
|
||||
extern int swiotlb_force;
|
||||
|
||||
@@ -14,8 +14,10 @@
|
||||
* See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/tracepoint-defs.h>
|
||||
|
||||
@@ -132,6 +134,9 @@ extern void syscall_unregfunc(void);
|
||||
void *it_func; \
|
||||
void *__data; \
|
||||
\
|
||||
if (!cpu_online(raw_smp_processor_id())) \
|
||||
return; \
|
||||
\
|
||||
if (!(cond)) \
|
||||
return; \
|
||||
prercu; \
|
||||
|
||||
@@ -649,6 +649,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
|
||||
/* tty_mutex.c */
|
||||
/* functions for preparation of BKL removal */
|
||||
extern void __lockfunc tty_lock(struct tty_struct *tty);
|
||||
extern int tty_lock_interruptible(struct tty_struct *tty);
|
||||
extern void __lockfunc tty_unlock(struct tty_struct *tty);
|
||||
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
|
||||
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
|
||||
|
||||
@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
|
||||
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
|
||||
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
|
||||
|
||||
unsigned long ucs2_utf8size(const ucs2_char_t *src);
|
||||
unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
|
||||
unsigned long maxlength);
|
||||
|
||||
#endif /* _LINUX_UCS2_STRING_H_ */
|
||||
|
||||
@@ -311,6 +311,7 @@ enum {
|
||||
|
||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
||||
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
|
||||
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
|
||||
#define create_freezable_workqueue(name) \
|
||||
alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
|
||||
1, (name))
|
||||
alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
|
||||
WQ_MEM_RECLAIM, 1, (name))
|
||||
#define create_singlethread_workqueue(name) \
|
||||
alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
|
||||
alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
|
||||
|
||||
extern void destroy_workqueue(struct workqueue_struct *wq);
|
||||
|
||||
|
||||
@@ -533,7 +533,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
|
||||
const unsigned int requested_sizes[]);
|
||||
int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
|
||||
int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
|
||||
int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking);
|
||||
int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
|
||||
bool nonblocking);
|
||||
|
||||
int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
|
||||
int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
|
||||
|
||||
@@ -6,8 +6,8 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
void unix_inflight(struct file *fp);
|
||||
void unix_notinflight(struct file *fp);
|
||||
void unix_inflight(struct user_struct *user, struct file *fp);
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp);
|
||||
void unix_gc(void);
|
||||
void wait_for_unix_gc(void);
|
||||
struct sock *unix_get_socket(struct file *filp);
|
||||
|
||||
@@ -252,6 +252,12 @@ struct l2cap_conn_rsp {
|
||||
#define L2CAP_PSM_3DSP 0x0021
|
||||
#define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */
|
||||
|
||||
#define L2CAP_PSM_DYN_START 0x1001
|
||||
#define L2CAP_PSM_DYN_END 0xffff
|
||||
#define L2CAP_PSM_AUTO_END 0x10ff
|
||||
#define L2CAP_PSM_LE_DYN_START 0x0080
|
||||
#define L2CAP_PSM_LE_DYN_END 0x00ff
|
||||
|
||||
/* channel identifier */
|
||||
#define L2CAP_CID_SIGNALING 0x0001
|
||||
#define L2CAP_CID_CONN_LESS 0x0002
|
||||
|
||||
@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
|
||||
return dst && !(dst->flags & DST_METADATA);
|
||||
}
|
||||
|
||||
static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
|
||||
const struct sk_buff *skb_b)
|
||||
{
|
||||
const struct metadata_dst *a, *b;
|
||||
|
||||
if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
|
||||
return 0;
|
||||
|
||||
a = (const struct metadata_dst *) skb_dst(skb_a);
|
||||
b = (const struct metadata_dst *) skb_dst(skb_b);
|
||||
|
||||
if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
|
||||
return 1;
|
||||
|
||||
return memcmp(&a->u.tun_info, &b->u.tun_info,
|
||||
sizeof(a->u.tun_info) + a->u.tun_info.options_len);
|
||||
}
|
||||
|
||||
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
|
||||
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
|
||||
|
||||
|
||||
@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
|
||||
|
||||
void ip6_route_input(struct sk_buff *skb);
|
||||
|
||||
struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
|
||||
struct flowi6 *fl6);
|
||||
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
|
||||
struct flowi6 *fl6, int flags);
|
||||
|
||||
static inline struct dst_entry *ip6_route_output(struct net *net,
|
||||
const struct sock *sk,
|
||||
struct flowi6 *fl6)
|
||||
{
|
||||
return ip6_route_output_flags(net, sk, fl6, 0);
|
||||
}
|
||||
|
||||
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
|
||||
int flags);
|
||||
|
||||
|
||||
@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
|
||||
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
|
||||
u8 *protocol, struct flowi4 *fl4);
|
||||
int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
|
||||
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
|
||||
|
||||
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
|
||||
|
||||
@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
|
||||
const struct nf_conntrack_l3proto *l3proto,
|
||||
const struct nf_conntrack_l4proto *proto);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define CONNTRACK_LOCKS 8
|
||||
#else
|
||||
# define CONNTRACK_LOCKS 1024
|
||||
#endif
|
||||
#define CONNTRACK_LOCKS 1024
|
||||
|
||||
extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
|
||||
void nf_conntrack_lock(spinlock_t *lock);
|
||||
|
||||
extern spinlock_t nf_conntrack_expect_lock;
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ struct scm_creds {
|
||||
struct scm_fp_list {
|
||||
short count;
|
||||
short max;
|
||||
struct user_struct *user;
|
||||
struct file *fp[SCM_MAX_FD];
|
||||
};
|
||||
|
||||
|
||||
@@ -756,7 +756,6 @@ struct sctp_transport {
|
||||
|
||||
/* Reference counting. */
|
||||
atomic_t refcnt;
|
||||
__u32 dead:1,
|
||||
/* RTO-Pending : A flag used to track if one of the DATA
|
||||
* chunks sent to this address is currently being
|
||||
* used to compute a RTT. If this flag is 0,
|
||||
@@ -766,7 +765,7 @@ struct sctp_transport {
|
||||
* calculation completes (i.e. the DATA chunk
|
||||
* is SACK'd) clear this flag.
|
||||
*/
|
||||
rto_pending:1,
|
||||
__u32 rto_pending:1,
|
||||
|
||||
/*
|
||||
* hb_sent : a flag that signals that we have a pending
|
||||
@@ -955,7 +954,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
|
||||
void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
|
||||
void sctp_transport_free(struct sctp_transport *);
|
||||
void sctp_transport_reset_timers(struct sctp_transport *);
|
||||
void sctp_transport_hold(struct sctp_transport *);
|
||||
int sctp_transport_hold(struct sctp_transport *);
|
||||
void sctp_transport_put(struct sctp_transport *);
|
||||
void sctp_transport_update_rto(struct sctp_transport *, __u32);
|
||||
void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
|
||||
|
||||
@@ -1035,18 +1035,6 @@ struct proto {
|
||||
struct list_head node;
|
||||
#ifdef SOCK_REFCNT_DEBUG
|
||||
atomic_t socks;
|
||||
#endif
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
/*
|
||||
* cgroup specific init/deinit functions. Called once for all
|
||||
* protocols that implement it, from cgroups populate function.
|
||||
* This function has to setup any files the protocol want to
|
||||
* appear in the kmem cgroup filesystem.
|
||||
*/
|
||||
int (*init_cgroup)(struct mem_cgroup *memcg,
|
||||
struct cgroup_subsys *ss);
|
||||
void (*destroy_cgroup)(struct mem_cgroup *memcg);
|
||||
struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg);
|
||||
#endif
|
||||
int (*diag_destroy)(struct sock *sk, int err);
|
||||
};
|
||||
|
||||
@@ -16,7 +16,7 @@ struct sock_reuseport {
|
||||
};
|
||||
|
||||
extern int reuseport_alloc(struct sock *sk);
|
||||
extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2);
|
||||
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
|
||||
extern void reuseport_detach_sock(struct sock *sk);
|
||||
extern struct sock *reuseport_select_sock(struct sock *sk,
|
||||
u32 hash,
|
||||
|
||||
@@ -216,7 +216,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
|
||||
/* TCP thin-stream limits */
|
||||
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
|
||||
|
||||
/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
|
||||
/* TCP initial congestion window as per rfc6928 */
|
||||
#define TCP_INIT_CWND 10
|
||||
|
||||
/* Bit Flags for sysctl_tcp_fastopen */
|
||||
@@ -447,7 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
|
||||
|
||||
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_v4_mtu_reduced(struct sock *sk);
|
||||
void tcp_req_err(struct sock *sk, u32 seq);
|
||||
void tcp_req_err(struct sock *sk, u32 seq, bool abort);
|
||||
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
|
||||
struct sock *tcp_create_openreq_child(const struct sock *sk,
|
||||
struct request_sock *req,
|
||||
|
||||
@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
|
||||
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
|
||||
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
|
||||
unsigned char *buffer, int count);
|
||||
int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
|
||||
unsigned char *buffer, int count);
|
||||
int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
|
||||
int count);
|
||||
|
||||
/* main midi functions */
|
||||
|
||||
|
||||
@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
|
||||
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
|
||||
|
||||
bool target_sense_desc_format(struct se_device *dev);
|
||||
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
|
||||
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
|
||||
struct request_queue *q, int block_size);
|
||||
|
||||
#endif /* TARGET_CORE_BACKEND_H */
|
||||
|
||||
@@ -140,6 +140,8 @@ enum se_cmd_flags_table {
|
||||
SCF_COMPARE_AND_WRITE = 0x00080000,
|
||||
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
|
||||
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
|
||||
SCF_ACK_KREF = 0x00400000,
|
||||
SCF_USE_CPUID = 0x00800000,
|
||||
};
|
||||
|
||||
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
|
||||
@@ -187,6 +189,7 @@ enum target_sc_flags_table {
|
||||
TARGET_SCF_BIDI_OP = 0x01,
|
||||
TARGET_SCF_ACK_KREF = 0x02,
|
||||
TARGET_SCF_UNKNOWN_SIZE = 0x04,
|
||||
TARGET_SCF_USE_CPUID = 0x08,
|
||||
};
|
||||
|
||||
/* fabric independent task management function values */
|
||||
@@ -490,8 +493,9 @@ struct se_cmd {
|
||||
#define CMD_T_SENT (1 << 4)
|
||||
#define CMD_T_STOP (1 << 5)
|
||||
#define CMD_T_DEV_ACTIVE (1 << 7)
|
||||
#define CMD_T_REQUEST_STOP (1 << 8)
|
||||
#define CMD_T_BUSY (1 << 9)
|
||||
#define CMD_T_TAS (1 << 10)
|
||||
#define CMD_T_FABRIC_STOP (1 << 11)
|
||||
spinlock_t t_state_lock;
|
||||
struct kref cmd_kref;
|
||||
struct completion t_transport_stop_comp;
|
||||
@@ -511,9 +515,6 @@ struct se_cmd {
|
||||
|
||||
struct list_head state_list;
|
||||
|
||||
/* old task stop completion, consider merging with some of the above */
|
||||
struct completion task_stop_comp;
|
||||
|
||||
/* backend private data */
|
||||
void *priv;
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ TRACE_EVENT(fence_annotate_wait_on,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(driver, fence->ops->get_driver_name(fence))
|
||||
__string(timeline, fence->ops->get_driver_name(fence))
|
||||
__string(timeline, fence->ops->get_timeline_name(fence))
|
||||
__field(unsigned int, context)
|
||||
__field(unsigned int, seqno)
|
||||
|
||||
|
||||
@@ -48,6 +48,8 @@ struct drm_etnaviv_timespec {
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
|
||||
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
|
||||
|
||||
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
|
||||
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
|
||||
@@ -59,6 +61,7 @@ struct drm_etnaviv_timespec {
|
||||
#define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
|
||||
#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
|
||||
#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
|
||||
#define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
|
||||
|
||||
#define ETNA_MAX_PIPES 4
|
||||
|
||||
|
||||
@@ -222,7 +222,6 @@ struct fsxattr {
|
||||
#define BLKSECDISCARD _IO(0x12,125)
|
||||
#define BLKROTATIONAL _IO(0x12,126)
|
||||
#define BLKZEROOUT _IO(0x12,127)
|
||||
#define BLKDAXSET _IO(0x12,128)
|
||||
#define BLKDAXGET _IO(0x12,129)
|
||||
|
||||
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
|
||||
|
||||
Reference in New Issue
Block a user