mirror of
https://github.com/torvalds/linux.git
synced 2024-12-18 09:02:17 +00:00
35219bc5c7
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZuQEvgAKCRCRxhvAZXjc onQWAQD6IxAKPU0zom2FoWNilvSzPs7WglTtvddX9pu/lT1RNAD/YC/wOLW8mvAv 9oTAmigQDQQhEWdJA9RgLZBiw7k+DAw= =zWFb -----END PGP SIGNATURE----- Merge tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull netfs updates from Christian Brauner: "This contains the work to improve read/write performance for the new netfs library. The main performance enhancing changes are: - Define a structure, struct folio_queue, and a new iterator type, ITER_FOLIOQ, to hold a buffer as a replacement for ITER_XARRAY. See that patch for questions about naming and form. ITER_FOLIOQ is provided as a replacement for ITER_XARRAY. The problem with an xarray is that accessing it requires the use of a lock (typically the RCU read lock) - and this means that we can't supply iterate_and_advance() with a step function that might sleep (crypto for example) without having to drop the lock between pages. ITER_FOLIOQ is the iterator for a chain of folio_queue structs, where each folio_queue holds a small list of folios. A folio_queue struct is a simpler structure than xarray and is not subject to concurrent manipulation by the VM. folio_queue is used rather than a bvec[] as it can form lists of indefinite size, adding to one end and removing from the other on the fly. - Provide a copy_folio_from_iter() wrapper. - Make cifs RDMA support ITER_FOLIOQ. - Use folio queues in the write-side helpers instead of xarrays. - Add a function to reset the iterator in a subrequest. - Simplify the write-side helpers to use sheaves to skip gaps rather than trying to work out where gaps are. - In afs, make the read subrequests asynchronous, putting them into work items to allow the next patch to do progressive unlocking/reading. - Overhaul the read-side helpers to improve performance. - Fix the caching of a partial block at the end of a file. - Allow a store to be cancelled. Then some changes for cifs to make it use folio queues instead of xarrays for crypto bufferage: - Use raw iteration functions rather than manually coding iteration when hashing data. - Switch to using folio_queue for crypto buffers. - Remove the xarray bits. Make some adjustments to the /proc/fs/netfs/stats file such that: - All the netfs stats lines begin 'Netfs:' but change this to something a bit more useful. - Add a couple of stats counters to track the numbers of skips and waits on the per-inode writeback serialisation lock to make it easier to check for this as a source of performance loss. Miscellaneous work: - Ensure that the sb_writers lock is taken around vfs_{set,remove}xattr() in the cachefiles code. - Reduce the number of conditional branches in netfs_perform_write(). - Move the CIFS_INO_MODIFIED_ATTR flag to the netfs_inode struct and remove cifs_post_modify(). - Move the max_len/max_nr_segs members from netfs_io_subrequest to netfs_io_request as they're only needed for one subreq at a time. - Add an 'unknown' source value for tracing purposes. - Remove NETFS_COPY_TO_CACHE as it's no longer used. - Set the request work function up front at allocation time. - Use bh-disabling spinlocks for rreq->lock as cachefiles completion may be run from block-filesystem DIO completion in softirq context. - Remove fs/netfs/io.c" * tag 'vfs-6.12.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (25 commits) docs: filesystems: corrected grammar of netfs page cifs: Don't support ITER_XARRAY cifs: Switch crypto buffer to use a folio_queue rather than an xarray cifs: Use iterate_and_advance*() routines directly for hashing netfs: Cancel dirty folios that have no storage destination cachefiles, netfs: Fix write to partial block at EOF netfs: Remove fs/netfs/io.c netfs: Speed up buffered reading afs: Make read subreqs async netfs: Simplify the writeback code netfs: Provide an iterator-reset function netfs: Use new folio_queue data type and iterator instead of xarray iter cifs: Provide the capability to extract from ITER_FOLIOQ to RDMA SGEs iov_iter: Provide copy_folio_from_iter() mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios netfs: Use bh-disabling spinlocks for rreq->lock netfs: Set the request work function upon allocation netfs: Remove NETFS_COPY_TO_CACHE netfs: Reserve netfs_sreq_source 0 as unset/unknown netfs: Move max_len/max_nr_segs from netfs_io_subrequest to netfs_io_stream ...
170 lines
4.3 KiB
C
170 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Miscellaneous bits for the netfs support library.
|
|
*
|
|
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include "internal.h"
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/netfs.h>
|
|
|
|
MODULE_DESCRIPTION("Network fs support");
|
|
MODULE_AUTHOR("Red Hat, Inc.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
|
|
|
|
unsigned netfs_debug;
|
|
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
|
|
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
|
|
|
|
static struct kmem_cache *netfs_request_slab;
|
|
static struct kmem_cache *netfs_subrequest_slab;
|
|
mempool_t netfs_request_pool;
|
|
mempool_t netfs_subrequest_pool;
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
LIST_HEAD(netfs_io_requests);
|
|
DEFINE_SPINLOCK(netfs_proc_lock);
|
|
|
|
static const char *netfs_origins[nr__netfs_io_origin] = {
|
|
[NETFS_READAHEAD] = "RA",
|
|
[NETFS_READPAGE] = "RP",
|
|
[NETFS_READ_GAPS] = "RG",
|
|
[NETFS_READ_FOR_WRITE] = "RW",
|
|
[NETFS_DIO_READ] = "DR",
|
|
[NETFS_WRITEBACK] = "WB",
|
|
[NETFS_WRITETHROUGH] = "WT",
|
|
[NETFS_UNBUFFERED_WRITE] = "UW",
|
|
[NETFS_DIO_WRITE] = "DW",
|
|
[NETFS_PGPRIV2_COPY_TO_CACHE] = "2C",
|
|
};
|
|
|
|
/*
|
|
* Generate a list of I/O requests in /proc/fs/netfs/requests
|
|
*/
|
|
static int netfs_requests_seq_show(struct seq_file *m, void *v)
|
|
{
|
|
struct netfs_io_request *rreq;
|
|
|
|
if (v == &netfs_io_requests) {
|
|
seq_puts(m,
|
|
"REQUEST OR REF FL ERR OPS COVERAGE\n"
|
|
"======== == === == ==== === =========\n"
|
|
);
|
|
return 0;
|
|
}
|
|
|
|
rreq = list_entry(v, struct netfs_io_request, proc_link);
|
|
seq_printf(m,
|
|
"%08x %s %3d %2lx %4ld %3d @%04llx %llx/%llx",
|
|
rreq->debug_id,
|
|
netfs_origins[rreq->origin],
|
|
refcount_read(&rreq->ref),
|
|
rreq->flags,
|
|
rreq->error,
|
|
atomic_read(&rreq->nr_outstanding),
|
|
rreq->start, rreq->submitted, rreq->len);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
|
|
__acquires(rcu)
|
|
{
|
|
rcu_read_lock();
|
|
return seq_list_start_head(&netfs_io_requests, *_pos);
|
|
}
|
|
|
|
static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
|
|
{
|
|
return seq_list_next(v, &netfs_io_requests, _pos);
|
|
}
|
|
|
|
static void netfs_requests_seq_stop(struct seq_file *m, void *v)
|
|
__releases(rcu)
|
|
{
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static const struct seq_operations netfs_requests_seq_ops = {
|
|
.start = netfs_requests_seq_start,
|
|
.next = netfs_requests_seq_next,
|
|
.stop = netfs_requests_seq_stop,
|
|
.show = netfs_requests_seq_show,
|
|
};
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
static int __init netfs_init(void)
|
|
{
|
|
int ret = -ENOMEM;
|
|
|
|
netfs_request_slab = kmem_cache_create("netfs_request",
|
|
sizeof(struct netfs_io_request), 0,
|
|
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
|
|
NULL);
|
|
if (!netfs_request_slab)
|
|
goto error_req;
|
|
|
|
if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0)
|
|
goto error_reqpool;
|
|
|
|
netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
|
|
sizeof(struct netfs_io_subrequest), 0,
|
|
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
|
|
NULL);
|
|
if (!netfs_subrequest_slab)
|
|
goto error_subreq;
|
|
|
|
if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
|
|
goto error_subreqpool;
|
|
|
|
if (!proc_mkdir("fs/netfs", NULL))
|
|
goto error_proc;
|
|
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
|
|
&netfs_requests_seq_ops))
|
|
goto error_procfile;
|
|
#ifdef CONFIG_FSCACHE_STATS
|
|
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
|
|
netfs_stats_show))
|
|
goto error_procfile;
|
|
#endif
|
|
|
|
ret = fscache_init();
|
|
if (ret < 0)
|
|
goto error_fscache;
|
|
return 0;
|
|
|
|
error_fscache:
|
|
error_procfile:
|
|
remove_proc_subtree("fs/netfs", NULL);
|
|
error_proc:
|
|
mempool_exit(&netfs_subrequest_pool);
|
|
error_subreqpool:
|
|
kmem_cache_destroy(netfs_subrequest_slab);
|
|
error_subreq:
|
|
mempool_exit(&netfs_request_pool);
|
|
error_reqpool:
|
|
kmem_cache_destroy(netfs_request_slab);
|
|
error_req:
|
|
return ret;
|
|
}
|
|
fs_initcall(netfs_init);
|
|
|
|
static void __exit netfs_exit(void)
|
|
{
|
|
fscache_exit();
|
|
remove_proc_subtree("fs/netfs", NULL);
|
|
mempool_exit(&netfs_subrequest_pool);
|
|
kmem_cache_destroy(netfs_subrequest_slab);
|
|
mempool_exit(&netfs_request_pool);
|
|
kmem_cache_destroy(netfs_request_slab);
|
|
}
|
|
module_exit(netfs_exit);
|