mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 20:22:09 +00:00
perf tools: Use dedicated non-atomic clear/set bit helpers
Use the dedicated non-atomic helpers for {clear,set}_bit() and their test variants, i.e. the double-underscore versions. Depsite being defined in atomic.h, and despite the kernel versions being atomic in the kernel, tools' {clear,set}_bit() helpers aren't actually atomic. Move to the double-underscore versions so that the versions that are expected to be atomic (for kernel developers) can be made atomic without affecting users that don't want atomic operations. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Message-Id: <20221119013450.2643007-6-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
7f2b47f22b
commit
75d7ba32f9
@ -70,7 +70,7 @@ static int do_for_each_set_bit(unsigned int num_bits)
|
||||
bitmap_zero(to_test, num_bits);
|
||||
skip = num_bits / set_bits;
|
||||
for (i = 0; i < num_bits; i += skip)
|
||||
set_bit(i, to_test);
|
||||
__set_bit(i, to_test);
|
||||
|
||||
for (i = 0; i < outer_iterations; i++) {
|
||||
old = accumulator;
|
||||
|
@ -230,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
|
||||
"WARNING: no sample cpu value"))
|
||||
return;
|
||||
|
||||
set_bit(sample->cpu, c2c_he->cpuset);
|
||||
__set_bit(sample->cpu, c2c_he->cpuset);
|
||||
}
|
||||
|
||||
static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
|
||||
@ -247,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
|
||||
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
|
||||
return;
|
||||
|
||||
set_bit(node, c2c_he->nodeset);
|
||||
__set_bit(node, c2c_he->nodeset);
|
||||
|
||||
if (c2c_he->paddr != sample->phys_addr) {
|
||||
c2c_he->paddr_cnt++;
|
||||
@ -2318,7 +2318,7 @@ static int setup_nodes(struct perf_session *session)
|
||||
continue;
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, idx, map) {
|
||||
set_bit(cpu.cpu, set);
|
||||
__set_bit(cpu.cpu, set);
|
||||
|
||||
if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
|
||||
return -EINVAL;
|
||||
|
@ -216,7 +216,7 @@ static struct kwork_atom *atom_new(struct perf_kwork *kwork,
|
||||
list_add_tail(&page->list, &kwork->atom_page_list);
|
||||
|
||||
found_atom:
|
||||
set_bit(i, page->bitmap);
|
||||
__set_bit(i, page->bitmap);
|
||||
atom->time = sample->time;
|
||||
atom->prev = NULL;
|
||||
atom->page_addr = page;
|
||||
@ -229,8 +229,8 @@ static void atom_free(struct kwork_atom *atom)
|
||||
if (atom->prev != NULL)
|
||||
atom_free(atom->prev);
|
||||
|
||||
clear_bit(atom->bit_inpage,
|
||||
((struct kwork_atom_page *)atom->page_addr)->bitmap);
|
||||
__clear_bit(atom->bit_inpage,
|
||||
((struct kwork_atom_page *)atom->page_addr)->bitmap);
|
||||
}
|
||||
|
||||
static void atom_del(struct kwork_atom *atom)
|
||||
|
@ -3555,7 +3555,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp
|
||||
/* Return ENODEV is input cpu is greater than max cpu */
|
||||
if ((unsigned long)cpu.cpu > mask->nbits)
|
||||
return -ENODEV;
|
||||
set_bit(cpu.cpu, mask->bits);
|
||||
__set_bit(cpu.cpu, mask->bits);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3627,8 +3627,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
|
||||
pr_debug("nr_threads: %d\n", rec->nr_threads);
|
||||
|
||||
for (t = 0; t < rec->nr_threads; t++) {
|
||||
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
|
||||
set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
|
||||
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
|
||||
__set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
|
||||
if (verbose) {
|
||||
pr_debug("thread_masks[%d]: ", t);
|
||||
mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
|
||||
|
@ -1573,7 +1573,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
|
||||
|
||||
if (sched->map.comp) {
|
||||
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
|
||||
if (!test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
|
||||
if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
|
||||
sched->map.comp_cpus[cpus_nr++] = this_cpu;
|
||||
new_cpu = true;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
|
||||
if (map && bm) {
|
||||
for (i = 0; i < perf_cpu_map__nr(map); i++)
|
||||
set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
|
||||
__set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
|
||||
}
|
||||
|
||||
if (map)
|
||||
|
@ -33,7 +33,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
|
||||
int i;
|
||||
|
||||
perf_cpu_map__for_each_cpu(cpu, i, map)
|
||||
set_bit(cpu.cpu, bm);
|
||||
__set_bit(cpu.cpu, bm);
|
||||
}
|
||||
|
||||
if (map)
|
||||
|
@ -58,14 +58,14 @@ void affinity__set(struct affinity *a, int cpu)
|
||||
return;
|
||||
|
||||
a->changed = true;
|
||||
set_bit(cpu, a->sched_cpus);
|
||||
__set_bit(cpu, a->sched_cpus);
|
||||
/*
|
||||
* We ignore errors because affinity is just an optimization.
|
||||
* This could happen for example with isolated CPUs or cpusets.
|
||||
* In this case the IPIs inside the kernel's perf API still work.
|
||||
*/
|
||||
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
|
||||
clear_bit(cpu, a->sched_cpus);
|
||||
__clear_bit(cpu, a->sched_cpus);
|
||||
}
|
||||
|
||||
static void __affinity__cleanup(struct affinity *a)
|
||||
|
@ -79,12 +79,12 @@ struct perf_file_attr {
|
||||
|
||||
void perf_header__set_feat(struct perf_header *header, int feat)
|
||||
{
|
||||
set_bit(feat, header->adds_features);
|
||||
__set_bit(feat, header->adds_features);
|
||||
}
|
||||
|
||||
void perf_header__clear_feat(struct perf_header *header, int feat)
|
||||
{
|
||||
clear_bit(feat, header->adds_features);
|
||||
__clear_bit(feat, header->adds_features);
|
||||
}
|
||||
|
||||
bool perf_header__has_feat(const struct perf_header *header, int feat)
|
||||
@ -1358,7 +1358,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
|
||||
rewinddir(dir);
|
||||
|
||||
for_each_memory(phys, dir) {
|
||||
set_bit(phys, n->set);
|
||||
__set_bit(phys, n->set);
|
||||
}
|
||||
|
||||
closedir(dir);
|
||||
@ -3952,7 +3952,7 @@ int perf_file_header__read(struct perf_file_header *header,
|
||||
|
||||
if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
|
||||
bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
|
||||
set_bit(HEADER_BUILD_ID, header->adds_features);
|
||||
__set_bit(HEADER_BUILD_ID, header->adds_features);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, i
|
||||
pr_err("Failed to allocate node mask for mbind: error %m\n");
|
||||
return -1;
|
||||
}
|
||||
set_bit(node_index, node_mask);
|
||||
__set_bit(node_index, node_mask);
|
||||
if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
|
||||
pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
|
||||
data, data + mmap_len, node_index);
|
||||
@ -256,7 +256,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
|
||||
for (idx = 0; idx < nr_cpus; idx++) {
|
||||
cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */
|
||||
if (cpu__get_node(cpu) == node)
|
||||
set_bit(cpu.cpu, mask->bits);
|
||||
__set_bit(cpu.cpu, mask->bits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -270,7 +270,7 @@ static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *
|
||||
if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
|
||||
build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
|
||||
else if (mp->affinity == PERF_AFFINITY_CPU)
|
||||
set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
|
||||
__set_bit(map->core.cpu.cpu, map->affinity_mask.bits);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1513,7 +1513,7 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
|
||||
|
||||
memset(bits, 0, BITS_TO_BYTES(PERF_PMU_FORMAT_BITS));
|
||||
for (b = from; b <= to; b++)
|
||||
set_bit(b, bits);
|
||||
__set_bit(b, bits);
|
||||
}
|
||||
|
||||
void perf_pmu__del_formats(struct list_head *formats)
|
||||
|
@ -365,7 +365,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
|
||||
|
||||
sprintf(handler, "%s::%s", event->system, event->name);
|
||||
|
||||
if (!test_and_set_bit(event->id, events_defined))
|
||||
if (!__test_and_set_bit(event->id, events_defined))
|
||||
define_event_symbols(event, handler, event->print_fmt.args);
|
||||
|
||||
s = nsecs / NSEC_PER_SEC;
|
||||
|
@ -933,7 +933,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
|
||||
|
||||
sprintf(handler_name, "%s__%s", event->system, event->name);
|
||||
|
||||
if (!test_and_set_bit(event->id, events_defined))
|
||||
if (!__test_and_set_bit(event->id, events_defined))
|
||||
define_event_symbols(event, handler_name, event->print_fmt.args);
|
||||
|
||||
handler = get_handler(handler_name);
|
||||
|
@ -2748,7 +2748,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
|
||||
goto out_delete_map;
|
||||
}
|
||||
|
||||
set_bit(cpu.cpu, cpu_bitmap);
|
||||
__set_bit(cpu.cpu, cpu_bitmap);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
|
@ -741,7 +741,7 @@ static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
|
||||
break;
|
||||
}
|
||||
|
||||
set_bit(c.cpu, cpumask_bits(b));
|
||||
__set_bit(c.cpu, cpumask_bits(b));
|
||||
}
|
||||
|
||||
perf_cpu_map__put(m);
|
||||
|
Loading…
Reference in New Issue
Block a user