fscache, cachefiles: Remove the histogram stuff

Remove the histogram stuff as it's mostly going to be outdated.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
cc: linux-cachefs@redhat.com
Link: https://lore.kernel.org/r/162431195953.2908479.16770977195634296638.stgit@warthog.procyon.org.uk/
This commit is contained in:
David Howells 2021-05-12 09:40:19 +01:00
parent 884a76881f
commit 6ae9bd8bb0
15 changed files with 0 additions and 337 deletions

View File

@ -19,22 +19,3 @@ config CACHEFILES_DEBUG
caching on files module. If this is set, the debugging output may be
enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
by including a debugging specifier in /etc/cachefilesd.conf.
config CACHEFILES_HISTOGRAM
bool "Gather latency information on CacheFiles"
depends on CACHEFILES && PROC_FS
help
This option causes latency information to be gathered on CacheFiles
operation and exported through file:
/proc/fs/cachefiles/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/cachefiles.rst for more
information.

View File

@ -15,6 +15,4 @@ cachefiles-y := \
security.o \
xattr.o
cachefiles-$(CONFIG_CACHEFILES_HISTOGRAM) += proc.o
obj-$(CONFIG_CACHEFILES) := cachefiles.o

View File

@ -180,31 +180,6 @@ extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
struct dentry *dir, char *filename);
/*
* proc.c
*/
#ifdef CONFIG_CACHEFILES_HISTOGRAM
extern atomic_t cachefiles_lookup_histogram[HZ];
extern atomic_t cachefiles_mkdir_histogram[HZ];
extern atomic_t cachefiles_create_histogram[HZ];
extern int __init cachefiles_proc_init(void);
extern void cachefiles_proc_cleanup(void);
static inline
void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
#else
#define cachefiles_proc_init() (0)
#define cachefiles_proc_cleanup() do {} while (0)
#define cachefiles_hist(hist, start_jif) do {} while (0)
#endif
/*
* rdwr.c
*/

View File

@ -69,15 +69,9 @@ static int __init cachefiles_init(void)
goto error_object_jar;
}
ret = cachefiles_proc_init();
if (ret < 0)
goto error_proc;
pr_info("Loaded\n");
return 0;
error_proc:
kmem_cache_destroy(cachefiles_object_jar);
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
@ -94,7 +88,6 @@ static void __exit cachefiles_exit(void)
{
pr_info("Unloading\n");
cachefiles_proc_cleanup();
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
}

View File

@ -496,7 +496,6 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
struct dentry *dir, *next = NULL;
struct inode *inode;
struct path path;
unsigned long start;
const char *name;
int ret, nlen;
@ -535,9 +534,7 @@ lookup_again:
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
next = lookup_one_len(name, dir, nlen);
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(next)) {
trace_cachefiles_lookup(object, next, NULL);
goto lookup_error;
@ -568,9 +565,7 @@ lookup_again:
ret = security_path_mkdir(&path, next, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
cachefiles_hist(cachefiles_mkdir_histogram, start);
if (!key)
trace_cachefiles_mkdir(object, next, ret);
if (ret < 0)
@ -604,10 +599,8 @@ lookup_again:
ret = security_path_mknod(&path, next, S_IFREG, 0);
if (ret < 0)
goto create_error;
start = jiffies;
ret = vfs_create(&init_user_ns, d_inode(dir), next,
S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
trace_cachefiles_create(object, next, ret);
if (ret < 0)
goto create_error;
@ -765,7 +758,6 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
const char *dirname)
{
struct dentry *subdir;
unsigned long start;
struct path path;
int ret;
@ -775,9 +767,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
inode_lock(d_inode(dir));
retry:
start = jiffies;
subdir = lookup_one_len(dirname, dir, strlen(dirname));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(subdir)) {
if (PTR_ERR(subdir) == -ENOMEM)
goto nomem_d_alloc;
@ -876,7 +866,6 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
struct cachefiles_object *object;
struct rb_node *_n;
struct dentry *victim;
unsigned long start;
int ret;
//_enter(",%pd/,%s",
@ -885,9 +874,7 @@ static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
/* look up the victim */
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
start = jiffies;
victim = lookup_one_len(filename, dir, strlen(filename));
cachefiles_hist(cachefiles_lookup_histogram, start);
if (IS_ERR(victim))
goto lookup_error;

View File

@ -1,114 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles statistics
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t cachefiles_lookup_histogram[HZ];
atomic_t cachefiles_mkdir_histogram[HZ];
atomic_t cachefiles_create_histogram[HZ];
/*
* display the latency histogram
*/
static int cachefiles_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned x, y, z, t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS LOOKUPS MKDIRS CREATES\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
x = atomic_read(&cachefiles_lookup_histogram[index]);
y = atomic_read(&cachefiles_mkdir_histogram[index]);
z = atomic_read(&cachefiles_create_histogram[index]);
if (x == 0 && y == 0 && z == 0)
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u\n", index, t, x, y, z);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *cachefiles_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *cachefiles_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void cachefiles_histogram_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations cachefiles_histogram_ops = {
.start = cachefiles_histogram_start,
.stop = cachefiles_histogram_stop,
.next = cachefiles_histogram_next,
.show = cachefiles_histogram_show,
};
/*
* initialise the /proc/fs/cachefiles/ directory
*/
int __init cachefiles_proc_init(void)
{
_enter("");
if (!proc_mkdir("fs/cachefiles", NULL))
goto error_dir;
if (!proc_create_seq("fs/cachefiles/histogram", S_IFREG | 0444, NULL,
&cachefiles_histogram_ops))
goto error_histogram;
_leave(" = 0");
return 0;
error_histogram:
remove_proc_entry("fs/cachefiles", NULL);
error_dir:
_leave(" = -ENOMEM");
return -ENOMEM;
}
/*
* clean up the /proc/fs/cachefiles/ directory
*/
void cachefiles_proc_cleanup(void)
{
remove_proc_entry("fs/cachefiles/histogram", NULL);
remove_proc_entry("fs/cachefiles", NULL);
}

View File

@ -29,23 +29,6 @@ config FSCACHE_STATS
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_HISTOGRAM
bool "Gather latency information on local caching"
depends on FSCACHE && PROC_FS
help
This option causes latency information to be gathered on local
caching and exported through file:
/proc/fs/fscache/histogram
The generation of this histogram adds a certain amount of overhead to
execution as there are a number of points at which data is gathered,
and on a multi-CPU system these may be on cachelines that keep
bouncing between CPUs. On the other hand, the histogram may be
useful for debugging purposes. Saying 'N' here is recommended.
See Documentation/filesystems/caching/fscache.rst for more information.
config FSCACHE_DEBUG
bool "Debug FS-Cache"
depends on FSCACHE

View File

@ -16,7 +16,6 @@ fscache-y := \
fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o
fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
obj-$(CONFIG_FSCACHE) := fscache.o

View File

@ -1,87 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache latency histogram
*
* Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL THREAD
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
atomic_t fscache_obj_instantiate_histogram[HZ];
atomic_t fscache_objs_histogram[HZ];
atomic_t fscache_ops_histogram[HZ];
atomic_t fscache_retrieval_delay_histogram[HZ];
atomic_t fscache_retrieval_histogram[HZ];
/*
* display the time-taken histogram
*/
static int fscache_histogram_show(struct seq_file *m, void *v)
{
unsigned long index;
unsigned n[5], t;
switch ((unsigned long) v) {
case 1:
seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS RETRV DLY RETRIEVLS\n");
return 0;
case 2:
seq_puts(m, "===== ===== ========= ========= ========= ========= =========\n");
return 0;
default:
index = (unsigned long) v - 3;
n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]);
n[1] = atomic_read(&fscache_ops_histogram[index]);
n[2] = atomic_read(&fscache_objs_histogram[index]);
n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]);
n[4] = atomic_read(&fscache_retrieval_histogram[index]);
if (!(n[0] | n[1] | n[2] | n[3] | n[4]))
return 0;
t = (index * 1000) / HZ;
seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n",
index, t, n[0], n[1], n[2], n[3], n[4]);
return 0;
}
}
/*
* set up the iterator to start reading from the first line
*/
static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos)
{
if ((unsigned long long)*_pos >= HZ + 2)
return NULL;
if (*_pos == 0)
*_pos = 1;
return (void *)(unsigned long) *_pos;
}
/*
* move to the next line
*/
static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos)
{
(*pos)++;
return (unsigned long long)*pos > HZ + 2 ?
NULL : (void *)(unsigned long) *pos;
}
/*
* clean up after reading
*/
static void fscache_histogram_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations fscache_histogram_ops = {
.start = fscache_histogram_start,
.stop = fscache_histogram_stop,
.next = fscache_histogram_next,
.show = fscache_histogram_show,
};

View File

@ -63,30 +63,6 @@ extern void fscache_cookie_put(struct fscache_cookie *,
extern struct fscache_cookie fscache_fsdef_index;
extern struct fscache_cookie_def fscache_fsdef_netfs_def;
/*
* histogram.c
*/
#ifdef CONFIG_FSCACHE_HISTOGRAM
extern atomic_t fscache_obj_instantiate_histogram[HZ];
extern atomic_t fscache_objs_histogram[HZ];
extern atomic_t fscache_ops_histogram[HZ];
extern atomic_t fscache_retrieval_delay_histogram[HZ];
extern atomic_t fscache_retrieval_histogram[HZ];
static inline void fscache_hist(atomic_t histogram[], unsigned long start_jif)
{
unsigned long jif = jiffies - start_jif;
if (jif >= HZ)
jif = HZ - 1;
atomic_inc(&histogram[jif]);
}
extern const struct seq_operations fscache_histogram_ops;
#else
#define fscache_hist(hist, start_jif) do {} while (0)
#endif
/*
* main.c
*/

View File

@ -277,13 +277,10 @@ static void fscache_object_work_func(struct work_struct *work)
{
struct fscache_object *object =
container_of(work, struct fscache_object, work);
unsigned long start;
_enter("{OBJ%x}", object->debug_id);
start = jiffies;
fscache_object_sm_dispatcher(object);
fscache_hist(fscache_objs_histogram, start);
fscache_put_object(object, fscache_obj_put_work);
}
@ -436,7 +433,6 @@ static const struct fscache_state *fscache_parent_ready(struct fscache_object *o
spin_lock(&parent->lock);
parent->n_ops++;
parent->n_obj_ops++;
object->lookup_jif = jiffies;
spin_unlock(&parent->lock);
_leave("");
@ -596,7 +592,6 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
object->cache->ops->lookup_complete(object);
fscache_stat_d(&fscache_n_cop_lookup_complete);
fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
fscache_stat(&fscache_n_object_avail);
_leave("");

View File

@ -616,7 +616,6 @@ void fscache_op_work_func(struct work_struct *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, work);
unsigned long start;
_enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage));
@ -624,9 +623,7 @@ void fscache_op_work_func(struct work_struct *work)
trace_fscache_op(op->object->cookie, op, fscache_op_work);
ASSERT(op->processor != NULL);
start = jiffies;
op->processor(op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave("");

View File

@ -289,7 +289,6 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op)
ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
atomic_read(&op->n_pages), ==, 0);
fscache_hist(fscache_retrieval_histogram, op->start_time);
if (op->context)
fscache_put_context(op->cookie, op->context);
@ -324,7 +323,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
op->mapping = mapping;
op->end_io_func = end_io_func;
op->context = context;
op->start_time = jiffies;
INIT_LIST_HEAD(&op->to_do);
/* Pin the netfs read context in case we need to do the actual netfs
@ -340,8 +338,6 @@ struct fscache_retrieval *fscache_alloc_retrieval(
*/
int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
{
unsigned long jif;
_enter("");
if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
@ -351,7 +347,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
fscache_stat(&fscache_n_retrievals_wait);
jif = jiffies;
if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
TASK_INTERRUPTIBLE) != 0) {
fscache_stat(&fscache_n_retrievals_intr);
@ -362,7 +357,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
smp_rmb();
fscache_hist(fscache_retrieval_delay_histogram, jif);
_leave(" = 0 [dly]");
return 0;
}

View File

@ -31,12 +31,6 @@ int __init fscache_proc_init(void)
goto error_stats;
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
if (!proc_create_seq("fs/fscache/histogram", S_IFREG | 0444, NULL,
&fscache_histogram_ops))
goto error_histogram;
#endif
#ifdef CONFIG_FSCACHE_OBJECT_LIST
if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
&fscache_objlist_proc_ops))
@ -49,10 +43,6 @@ int __init fscache_proc_init(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
error_objects:
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
error_histogram:
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
error_stats:
@ -73,9 +63,6 @@ void fscache_proc_cleanup(void)
#ifdef CONFIG_FSCACHE_OBJECT_LIST
remove_proc_entry("fs/fscache/objects", NULL);
#endif
#ifdef CONFIG_FSCACHE_HISTOGRAM
remove_proc_entry("fs/fscache/histogram", NULL);
#endif
#ifdef CONFIG_FSCACHE_STATS
remove_proc_entry("fs/fscache/stats", NULL);
#endif

View File

@ -147,7 +147,6 @@ struct fscache_retrieval {
fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
void *context; /* netfs read context (pinned) */
struct list_head to_do; /* list of things to be done by the backend */
unsigned long start_time; /* time at which retrieval started */
atomic_t n_pages; /* number of pages to be retrieved */
};