Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicting commits, all resolutions pretty trivial: drivers/bus/mhi/pci_generic.c5c2c853159("bus: mhi: pci-generic: configurable network interface MRU")56f6f4c4eb("bus: mhi: pci_generic: Apply no-op for wake using sideband wake boolean") drivers/nfc/s3fwrn5/firmware.ca0302ff590("nfc: s3fwrn5: remove unnecessary label")46573e3ab0("nfc: s3fwrn5: fix undefined parameter values in dev_err()")801e541c79("nfc: s3fwrn5: fix undefined parameter values in dev_err()") MAINTAINERS7d901a1e87("net: phy: add Maxlinear GPY115/21x/24x driver")8a7b46fa79("MAINTAINERS: add Yasushi SHOJI as reviewer for the Microchip CAN BUS Analyzer Tool driver") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -32,6 +32,8 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/* Registers */
|
||||
@@ -1377,6 +1379,7 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
|
||||
/* Non-UAPI available opcodes. */
|
||||
[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
|
||||
[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
|
||||
[BPF_ST | BPF_NOSPEC] = &&ST_NOSPEC,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
|
||||
[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
|
||||
@@ -1621,7 +1624,21 @@ out:
|
||||
COND_JMP(s, JSGE, >=)
|
||||
COND_JMP(s, JSLE, <=)
|
||||
#undef COND_JMP
|
||||
/* STX and ST and LDX*/
|
||||
/* ST, STX and LDX*/
|
||||
ST_NOSPEC:
|
||||
/* Speculation barrier for mitigating Speculative Store Bypass.
|
||||
* In case of arm64, we rely on the firmware mitigation as
|
||||
* controlled via the ssbd kernel parameter. Whenever the
|
||||
* mitigation is enabled, it works for all of the kernel code
|
||||
* with no need to provide any additional instructions here.
|
||||
* In case of x86, we use 'lfence' insn for mitigation. We
|
||||
* reuse preexisting logic from Spectre v1 mitigation that
|
||||
* happens to produce the required code on x86 for v4 as well.
|
||||
*/
|
||||
#ifdef CONFIG_X86
|
||||
barrier_nospec();
|
||||
#endif
|
||||
CONT;
|
||||
#define LDST(SIZEOP, SIZE) \
|
||||
STX_MEM_##SIZEOP: \
|
||||
*(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
|
||||
|
||||
@@ -206,15 +206,17 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs,
|
||||
verbose(cbs->private_data, "BUG_%02x\n", insn->code);
|
||||
}
|
||||
} else if (class == BPF_ST) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
if (BPF_MODE(insn->code) == BPF_MEM) {
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (BPF_MODE(insn->code) == 0xc0 /* BPF_NOSPEC, no UAPI */) {
|
||||
verbose(cbs->private_data, "(%02x) nospec\n", insn->code);
|
||||
} else {
|
||||
verbose(cbs->private_data, "BUG_st_%02x\n", insn->code);
|
||||
return;
|
||||
}
|
||||
verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n",
|
||||
insn->code,
|
||||
bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
|
||||
insn->dst_reg,
|
||||
insn->off, insn->imm);
|
||||
} else if (class == BPF_LDX) {
|
||||
if (BPF_MODE(insn->code) != BPF_MEM) {
|
||||
verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code);
|
||||
|
||||
@@ -2667,6 +2667,19 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
cur = env->cur_state->frame[env->cur_state->curframe];
|
||||
if (value_regno >= 0)
|
||||
reg = &cur->regs[value_regno];
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = reg && is_spillable_regtype(reg->type);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
if (state->stack[spi].slot_type[i] == STACK_INVALID) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (sanitize)
|
||||
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
|
||||
}
|
||||
|
||||
if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
|
||||
!register_is_null(reg) && env->bpf_capable) {
|
||||
@@ -2689,47 +2702,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
|
||||
verbose(env, "invalid size of register spill\n");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (state != cur && reg->type == PTR_TO_STACK) {
|
||||
verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!env->bypass_spec_v4) {
|
||||
bool sanitize = false;
|
||||
|
||||
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
|
||||
register_is_const(&state->stack[spi].spilled_ptr))
|
||||
sanitize = true;
|
||||
for (i = 0; i < BPF_REG_SIZE; i++)
|
||||
if (state->stack[spi].slot_type[i] == STACK_MISC) {
|
||||
sanitize = true;
|
||||
break;
|
||||
}
|
||||
if (sanitize) {
|
||||
int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
|
||||
int soff = (-spi - 1) * BPF_REG_SIZE;
|
||||
|
||||
/* detected reuse of integer stack slot with a pointer
|
||||
* which means either llvm is reusing stack slot or
|
||||
* an attacker is trying to exploit CVE-2018-3639
|
||||
* (speculative store bypass)
|
||||
* Have to sanitize that slot with preemptive
|
||||
* store of zero.
|
||||
*/
|
||||
if (*poff && *poff != soff) {
|
||||
/* disallow programs where single insn stores
|
||||
* into two different stack slots, since verifier
|
||||
* cannot sanitize them
|
||||
*/
|
||||
verbose(env,
|
||||
"insn %d cannot access two stack slots fp%d and fp%d",
|
||||
insn_idx, *poff, soff);
|
||||
return -EINVAL;
|
||||
}
|
||||
*poff = soff;
|
||||
}
|
||||
}
|
||||
save_register_state(state, spi, reg);
|
||||
} else {
|
||||
u8 type = STACK_MISC;
|
||||
@@ -6804,6 +6780,12 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
||||
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
|
||||
alu_state |= ptr_is_dst_reg ?
|
||||
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
||||
|
||||
/* Limit pruning on unknown scalars to enable deep search for
|
||||
* potential masking differences from other program paths.
|
||||
*/
|
||||
if (!off_is_imm)
|
||||
env->explore_alu_limits = true;
|
||||
}
|
||||
|
||||
err = update_alu_sanitation_state(aux, alu_state, alu_limit);
|
||||
@@ -10207,8 +10189,8 @@ next:
|
||||
}
|
||||
|
||||
/* Returns true if (rold safe implies rcur safe) */
|
||||
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
struct bpf_id_pair *idmap)
|
||||
static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
||||
struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
bool equal;
|
||||
|
||||
@@ -10234,6 +10216,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
return false;
|
||||
switch (rold->type) {
|
||||
case SCALAR_VALUE:
|
||||
if (env->explore_alu_limits)
|
||||
return false;
|
||||
if (rcur->type == SCALAR_VALUE) {
|
||||
if (!rold->precise && !rcur->precise)
|
||||
return true;
|
||||
@@ -10324,9 +10308,8 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool stacksafe(struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur,
|
||||
struct bpf_id_pair *idmap)
|
||||
static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur, struct bpf_id_pair *idmap)
|
||||
{
|
||||
int i, spi;
|
||||
|
||||
@@ -10371,9 +10354,8 @@ static bool stacksafe(struct bpf_func_state *old,
|
||||
continue;
|
||||
if (old->stack[spi].slot_type[0] != STACK_SPILL)
|
||||
continue;
|
||||
if (!regsafe(&old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr,
|
||||
idmap))
|
||||
if (!regsafe(env, &old->stack[spi].spilled_ptr,
|
||||
&cur->stack[spi].spilled_ptr, idmap))
|
||||
/* when explored and current stack slot are both storing
|
||||
* spilled registers, check that stored pointers types
|
||||
* are the same as well.
|
||||
@@ -10430,10 +10412,11 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
|
||||
|
||||
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
|
||||
if (!regsafe(env, &old->regs[i], &cur->regs[i],
|
||||
env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!stacksafe(old, cur, env->idmap_scratch))
|
||||
if (!stacksafe(env, old, cur, env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!refsafe(old, cur))
|
||||
@@ -12198,35 +12181,33 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
bpf_convert_ctx_access_t convert_ctx_access;
|
||||
bool ctx_access;
|
||||
|
||||
if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
|
||||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_READ;
|
||||
else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
|
||||
ctx_access = true;
|
||||
} else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
|
||||
insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
|
||||
type = BPF_WRITE;
|
||||
else
|
||||
ctx_access = BPF_CLASS(insn->code) == BPF_STX;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type == BPF_WRITE &&
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off) {
|
||||
env->insn_aux_data[i + delta].sanitize_stack_spill) {
|
||||
struct bpf_insn patch[] = {
|
||||
/* Sanitize suspicious stack slot with zero.
|
||||
* There are no memory dependencies for this store,
|
||||
* since it's only using frame pointer and immediate
|
||||
* constant of zero
|
||||
*/
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_FP,
|
||||
env->insn_aux_data[i + delta].sanitize_stack_off,
|
||||
0),
|
||||
/* the original STX instruction will immediately
|
||||
* overwrite the same stack slot with appropriate value
|
||||
*/
|
||||
*insn,
|
||||
BPF_ST_NOSPEC(),
|
||||
};
|
||||
|
||||
cnt = ARRAY_SIZE(patch);
|
||||
@@ -12240,6 +12221,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ctx_access)
|
||||
continue;
|
||||
|
||||
switch (env->insn_aux_data[i + delta].ptr_type) {
|
||||
case PTR_TO_CTX:
|
||||
if (!ops->convert_ctx_access)
|
||||
@@ -13093,37 +13077,6 @@ static void free_states(struct bpf_verifier_env *env)
|
||||
}
|
||||
}
|
||||
|
||||
/* The verifier is using insn_aux_data[] to store temporary data during
|
||||
* verification and to store information for passes that run after the
|
||||
* verification like dead code sanitization. do_check_common() for subprogram N
|
||||
* may analyze many other subprograms. sanitize_insn_aux_data() clears all
|
||||
* temporary data after do_check_common() finds that subprogram N cannot be
|
||||
* verified independently. pass_cnt counts the number of times
|
||||
* do_check_common() was run and insn->aux->seen tells the pass number
|
||||
* insn_aux_data was touched. These variables are compared to clear temporary
|
||||
* data from failed pass. For testing and experiments do_check_common() can be
|
||||
* run multiple times even when prior attempt to verify is unsuccessful.
|
||||
*
|
||||
* Note that special handling is needed on !env->bypass_spec_v1 if this is
|
||||
* ever called outside of error path with subsequent program rejection.
|
||||
*/
|
||||
static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
||||
{
|
||||
struct bpf_insn *insn = env->prog->insnsi;
|
||||
struct bpf_insn_aux_data *aux;
|
||||
int i, class;
|
||||
|
||||
for (i = 0; i < env->prog->len; i++) {
|
||||
class = BPF_CLASS(insn[i].code);
|
||||
if (class != BPF_LDX && class != BPF_STX)
|
||||
continue;
|
||||
aux = &env->insn_aux_data[i];
|
||||
if (aux->seen != env->pass_cnt)
|
||||
continue;
|
||||
memset(aux, 0, offsetof(typeof(*aux), orig_idx));
|
||||
}
|
||||
}
|
||||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
|
||||
@@ -13200,9 +13153,6 @@ out:
|
||||
if (!ret && pop_log)
|
||||
bpf_vlog_reset(&env->log, 0);
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
sanitize_insn_aux_data(env);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@@ -1221,9 +1221,7 @@ int cgroup1_get_tree(struct fs_context *fc)
|
||||
ret = cgroup_do_get_tree(fc);
|
||||
|
||||
if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
|
||||
struct super_block *sb = fc->root->d_sb;
|
||||
dput(fc->root);
|
||||
deactivate_locked_super(sb);
|
||||
fc_drop_locked(fc);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,13 @@
|
||||
*/
|
||||
#include <linux/dma-map-ops.h>
|
||||
|
||||
static struct page *dma_common_vaddr_to_page(void *cpu_addr)
|
||||
{
|
||||
if (is_vmalloc_addr(cpu_addr))
|
||||
return vmalloc_to_page(cpu_addr);
|
||||
return virt_to_page(cpu_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create scatter-list for the already allocated DMA buffer.
|
||||
*/
|
||||
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
struct page *page = dma_common_vaddr_to_page(cpu_addr);
|
||||
int ret;
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
struct page *page = dma_common_vaddr_to_page(cpu_addr);
|
||||
int ret = -ENXIO;
|
||||
|
||||
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
return -ENXIO;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
|
||||
page_to_pfn(page) + vma->vm_pgoff,
|
||||
user_count << PAGE_SHIFT, vma->vm_page_prot);
|
||||
#else
|
||||
return -ENXIO;
|
||||
|
||||
@@ -47,7 +47,7 @@ void __init idle_thread_set_boot_cpu(void)
|
||||
*
|
||||
* Creates the thread if it does not exist.
|
||||
*/
|
||||
static inline void idle_init(unsigned int cpu)
|
||||
static __always_inline void idle_init(unsigned int cpu)
|
||||
{
|
||||
struct task_struct *tsk = per_cpu(idle_threads, cpu);
|
||||
|
||||
|
||||
@@ -991,6 +991,11 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
|
||||
if (!p)
|
||||
goto out;
|
||||
|
||||
/* Protect timer list r/w in arm_timer() */
|
||||
sighand = lock_task_sighand(p, &flags);
|
||||
if (unlikely(sighand == NULL))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Fetch the current sample and update the timer's expiry time.
|
||||
*/
|
||||
@@ -1001,11 +1006,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
|
||||
|
||||
bump_cpu_timer(timer, now);
|
||||
|
||||
/* Protect timer list r/w in arm_timer() */
|
||||
sighand = lock_task_sighand(p, &flags);
|
||||
if (unlikely(sighand == NULL))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now re-arm for the new expiry time.
|
||||
*/
|
||||
|
||||
@@ -207,6 +207,7 @@ struct timer_base {
|
||||
unsigned int cpu;
|
||||
bool next_expiry_recalc;
|
||||
bool is_idle;
|
||||
bool timers_pending;
|
||||
DECLARE_BITMAP(pending_map, WHEEL_SIZE);
|
||||
struct hlist_head vectors[WHEEL_SIZE];
|
||||
} ____cacheline_aligned;
|
||||
@@ -595,6 +596,7 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
|
||||
* can reevaluate the wheel:
|
||||
*/
|
||||
base->next_expiry = bucket_expiry;
|
||||
base->timers_pending = true;
|
||||
base->next_expiry_recalc = false;
|
||||
trigger_dyntick_cpu(base, timer);
|
||||
}
|
||||
@@ -1582,6 +1584,7 @@ static unsigned long __next_timer_interrupt(struct timer_base *base)
|
||||
}
|
||||
|
||||
base->next_expiry_recalc = false;
|
||||
base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
|
||||
|
||||
return next;
|
||||
}
|
||||
@@ -1633,7 +1636,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
|
||||
u64 expires = KTIME_MAX;
|
||||
unsigned long nextevt;
|
||||
bool is_max_delta;
|
||||
|
||||
/*
|
||||
* Pretend that there is no timer pending if the cpu is offline.
|
||||
@@ -1646,7 +1648,6 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
if (base->next_expiry_recalc)
|
||||
base->next_expiry = __next_timer_interrupt(base);
|
||||
nextevt = base->next_expiry;
|
||||
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
|
||||
|
||||
/*
|
||||
* We have a fresh next event. Check whether we can forward the
|
||||
@@ -1664,7 +1665,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
||||
expires = basem;
|
||||
base->is_idle = false;
|
||||
} else {
|
||||
if (!is_max_delta)
|
||||
if (base->timers_pending)
|
||||
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
|
||||
/*
|
||||
* If we expect to sleep more than a tick, mark the base idle.
|
||||
@@ -1947,6 +1948,7 @@ int timers_prepare_cpu(unsigned int cpu)
|
||||
base = per_cpu_ptr(&timer_bases[b], cpu);
|
||||
base->clk = jiffies;
|
||||
base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
|
||||
base->timers_pending = false;
|
||||
base->is_idle = false;
|
||||
}
|
||||
return 0;
|
||||
|
||||
@@ -5985,7 +5985,8 @@ ftrace_graph_release(struct inode *inode, struct file *file)
|
||||
* infrastructure to do the synchronization, thus we must do it
|
||||
* ourselves.
|
||||
*/
|
||||
synchronize_rcu_tasks_rude();
|
||||
if (old_hash != EMPTY_HASH)
|
||||
synchronize_rcu_tasks_rude();
|
||||
|
||||
free_ftrace_hash(old_hash);
|
||||
}
|
||||
@@ -7544,7 +7545,7 @@ int ftrace_is_dead(void)
|
||||
*/
|
||||
int register_ftrace_function(struct ftrace_ops *ops)
|
||||
{
|
||||
int ret = -1;
|
||||
int ret;
|
||||
|
||||
ftrace_ops_init(ops);
|
||||
|
||||
|
||||
@@ -3880,10 +3880,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
if (unlikely(!head))
|
||||
return true;
|
||||
|
||||
return reader->read == rb_page_commit(reader) &&
|
||||
(commit == reader ||
|
||||
(commit == head &&
|
||||
head->read == rb_page_commit(commit)));
|
||||
/* Reader should exhaust content in reader page */
|
||||
if (reader->read != rb_page_commit(reader))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If writers are committing on the reader page, knowing all
|
||||
* committed content has been read, the ring buffer is empty.
|
||||
*/
|
||||
if (commit == reader)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If writers are committing on a page other than reader page
|
||||
* and head page, there should always be content to read.
|
||||
*/
|
||||
if (commit != head)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Writers are committing on the head page, we just need
|
||||
* to care about there're committed data, and the reader will
|
||||
* swap reader page with head page when it is to read data.
|
||||
*/
|
||||
return rb_page_commit(commit) == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -5609,6 +5609,10 @@ static const char readme_msg[] =
|
||||
"\t [:name=histname1]\n"
|
||||
"\t [:<handler>.<action>]\n"
|
||||
"\t [if <filter>]\n\n"
|
||||
"\t Note, special fields can be used as well:\n"
|
||||
"\t common_timestamp - to record current timestamp\n"
|
||||
"\t common_cpu - to record the CPU the event happened on\n"
|
||||
"\n"
|
||||
"\t When a matching event is hit, an entry is added to a hash\n"
|
||||
"\t table using the key(s) and value(s) named, and the value of a\n"
|
||||
"\t sum called 'hitcount' is incremented. Keys and values\n"
|
||||
|
||||
@@ -1111,7 +1111,7 @@ static const char *hist_field_name(struct hist_field *field,
|
||||
field->flags & HIST_FIELD_FL_ALIAS)
|
||||
field_name = hist_field_name(field->operands[0], ++level);
|
||||
else if (field->flags & HIST_FIELD_FL_CPU)
|
||||
field_name = "cpu";
|
||||
field_name = "common_cpu";
|
||||
else if (field->flags & HIST_FIELD_FL_EXPR ||
|
||||
field->flags & HIST_FIELD_FL_VAR_REF) {
|
||||
if (field->system) {
|
||||
@@ -1991,14 +1991,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
|
||||
hist_data->enable_timestamps = true;
|
||||
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
|
||||
hist_data->attrs->ts_in_usecs = true;
|
||||
} else if (strcmp(field_name, "cpu") == 0)
|
||||
} else if (strcmp(field_name, "common_cpu") == 0)
|
||||
*flags |= HIST_FIELD_FL_CPU;
|
||||
else {
|
||||
field = trace_find_event_field(file->event_call, field_name);
|
||||
if (!field || !field->size) {
|
||||
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name));
|
||||
field = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
/*
|
||||
* For backward compatibility, if field_name
|
||||
* was "cpu", then we treat this the same as
|
||||
* common_cpu.
|
||||
*/
|
||||
if (strcmp(field_name, "cpu") == 0) {
|
||||
*flags |= HIST_FIELD_FL_CPU;
|
||||
} else {
|
||||
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
|
||||
errpos(field_name));
|
||||
field = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
out:
|
||||
@@ -5085,7 +5095,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
|
||||
seq_printf(m, "%s=", hist_field->var.name);
|
||||
|
||||
if (hist_field->flags & HIST_FIELD_FL_CPU)
|
||||
seq_puts(m, "cpu");
|
||||
seq_puts(m, "common_cpu");
|
||||
else if (field_name) {
|
||||
if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
|
||||
hist_field->flags & HIST_FIELD_FL_ALIAS)
|
||||
|
||||
@@ -893,15 +893,13 @@ static struct synth_event *alloc_synth_event(const char *name, int n_fields,
|
||||
dyn_event_init(&event->devent, &synth_event_ops);
|
||||
|
||||
for (i = 0, j = 0; i < n_fields; i++) {
|
||||
fields[i]->field_pos = i;
|
||||
event->fields[i] = fields[i];
|
||||
|
||||
if (fields[i]->is_dynamic) {
|
||||
event->dynamic_fields[j] = fields[i];
|
||||
event->dynamic_fields[j]->field_pos = i;
|
||||
if (fields[i]->is_dynamic)
|
||||
event->dynamic_fields[j++] = fields[i];
|
||||
event->n_dynamic_fields++;
|
||||
}
|
||||
}
|
||||
event->n_dynamic_fields = j;
|
||||
event->n_fields = n_fields;
|
||||
out:
|
||||
return event;
|
||||
|
||||
@@ -14,10 +14,10 @@ struct synth_field {
|
||||
char *name;
|
||||
size_t size;
|
||||
unsigned int offset;
|
||||
unsigned int field_pos;
|
||||
bool is_signed;
|
||||
bool is_string;
|
||||
bool is_dynamic;
|
||||
bool field_pos;
|
||||
};
|
||||
|
||||
struct synth_event {
|
||||
|
||||
@@ -299,8 +299,8 @@ static int tracepoint_add_func(struct tracepoint *tp,
|
||||
* a pointer to it. This array is referenced by __DO_TRACE from
|
||||
* include/linux/tracepoint.h using rcu_dereference_sched().
|
||||
*/
|
||||
rcu_assign_pointer(tp->funcs, tp_funcs);
|
||||
tracepoint_update_call(tp, tp_funcs, false);
|
||||
rcu_assign_pointer(tp->funcs, tp_funcs);
|
||||
static_key_enable(&tp->key);
|
||||
|
||||
release_probes(old);
|
||||
|
||||
@@ -3676,15 +3676,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||||
unbound_release_work);
|
||||
struct workqueue_struct *wq = pwq->wq;
|
||||
struct worker_pool *pool = pwq->pool;
|
||||
bool is_last;
|
||||
bool is_last = false;
|
||||
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
/*
|
||||
* when @pwq is not linked, it doesn't hold any reference to the
|
||||
* @wq, and @wq is invalid to access.
|
||||
*/
|
||||
if (!list_empty(&pwq->pwqs_node)) {
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
put_unbound_pool(pool);
|
||||
|
||||
Reference in New Issue
Block a user