perf mmap: Discard legacy interface for mmap read
Discards perf_mmap__read_backward() and perf_mmap__read_catchup(). No tools use them. There are tools still use perf_mmap__read_forward(). Keep it, but add comments to point to the new interface for future use. Signed-off-by: Kan Liang <kan.liang@intel.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <ak@linux.intel.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/r/1516310792-208685-11-git-send-email-kan.liang@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
committed by
Arnaldo Carvalho de Melo
parent
600a7cfe88
commit
3effc2f165
@@ -63,6 +63,10 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
|
|||||||
return event;
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* legacy interface for mmap read.
|
||||||
|
* Don't use it. Use perf_mmap__read_event().
|
||||||
|
*/
|
||||||
union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
|
union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
|
||||||
{
|
{
|
||||||
u64 head;
|
u64 head;
|
||||||
@@ -78,41 +82,6 @@ union perf_event *perf_mmap__read_forward(struct perf_mmap *map)
|
|||||||
return perf_mmap__read(map, &map->prev, head);
|
return perf_mmap__read(map, &map->prev, head);
|
||||||
}
|
}
|
||||||
|
|
||||||
union perf_event *perf_mmap__read_backward(struct perf_mmap *map)
|
|
||||||
{
|
|
||||||
u64 head, end;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
|
||||||
*/
|
|
||||||
if (!refcount_read(&map->refcnt))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
head = perf_mmap__read_head(map);
|
|
||||||
if (!head)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* 'head' pointer starts from 0. Kernel minus sizeof(record) form
|
|
||||||
* it each time when kernel writes to it, so in fact 'head' is
|
|
||||||
* negative. 'end' pointer is made manually by adding the size of
|
|
||||||
* the ring buffer to 'head' pointer, means the validate data can
|
|
||||||
* read is the whole ring buffer. If 'end' is positive, the ring
|
|
||||||
* buffer has not fully filled, so we must adjust 'end' to 0.
|
|
||||||
*
|
|
||||||
* However, since both 'head' and 'end' is unsigned, we can't
|
|
||||||
* simply compare 'end' against 0. Here we compare '-head' and
|
|
||||||
* the size of the ring buffer, where -head is the number of bytes
|
|
||||||
* kernel write to the ring buffer.
|
|
||||||
*/
|
|
||||||
if (-head < (u64)(map->mask + 1))
|
|
||||||
end = 0;
|
|
||||||
else
|
|
||||||
end = head + map->mask + 1;
|
|
||||||
|
|
||||||
return perf_mmap__read(map, &map->prev, end);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read event from ring buffer one by one.
|
* Read event from ring buffer one by one.
|
||||||
* Return one event for each call.
|
* Return one event for each call.
|
||||||
@@ -152,17 +121,6 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map,
|
|||||||
return event;
|
return event;
|
||||||
}
|
}
|
||||||
|
|
||||||
void perf_mmap__read_catchup(struct perf_mmap *map)
|
|
||||||
{
|
|
||||||
u64 head;
|
|
||||||
|
|
||||||
if (!refcount_read(&map->refcnt))
|
|
||||||
return;
|
|
||||||
|
|
||||||
head = perf_mmap__read_head(map);
|
|
||||||
map->prev = head;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool perf_mmap__empty(struct perf_mmap *map)
|
static bool perf_mmap__empty(struct perf_mmap *map)
|
||||||
{
|
{
|
||||||
return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
|
return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base;
|
||||||
|
|||||||
@@ -65,8 +65,6 @@ void perf_mmap__put(struct perf_mmap *map);
|
|||||||
|
|
||||||
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
|
void perf_mmap__consume(struct perf_mmap *map, bool overwrite);
|
||||||
|
|
||||||
void perf_mmap__read_catchup(struct perf_mmap *md);
|
|
||||||
|
|
||||||
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
|
static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
|
||||||
{
|
{
|
||||||
struct perf_event_mmap_page *pc = mm->base;
|
struct perf_event_mmap_page *pc = mm->base;
|
||||||
@@ -87,7 +85,6 @@ static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
|
|||||||
}
|
}
|
||||||
|
|
||||||
union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
|
union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
|
||||||
union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
|
|
||||||
|
|
||||||
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
|
union perf_event *perf_mmap__read_event(struct perf_mmap *map,
|
||||||
bool overwrite,
|
bool overwrite,
|
||||||
|
|||||||
Reference in New Issue
Block a user