mirror of
https://github.com/torvalds/linux.git
synced 2024-12-01 16:41:39 +00:00
ee4cdf7ba8
Improve the efficiency of buffered reads in a number of ways: (1) Overhaul the algorithm in general so that it's a lot more compact and split the read submission code between buffered and unbuffered versions. The unbuffered version can be vastly simplified. (2) Read-result collection is handed off to a work queue rather than being done in the I/O thread. Multiple subrequests can be processes simultaneously. (3) When a subrequest is collected, any folios it fully spans are collected and "spare" data on either side is donated to either the previous or the next subrequest in the sequence. Notes: (*) Readahead expansion is massively slows down fio, presumably because it causes a load of extra allocations, both folio and xarray, up front before RPC requests can be transmitted. (*) RDMA with cifs does appear to work, both with SIW and RXE. (*) PG_private_2-based reading and copy-to-cache is split out into its own file and altered to use folio_queue. Note that the copy to the cache now creates a new write transaction against the cache and adds the folios to be copied into it. This allows it to use part of the writeback I/O code. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20240814203850.2240469-20-dhowells@redhat.com/ # v2 Signed-off-by: Christian Brauner <brauner@kernel.org>
93 lines
3.2 KiB
C
93 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Netfs support statistics
|
|
*
|
|
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <linux/seq_file.h>
|
|
#include "internal.h"
|
|
|
|
atomic_t netfs_n_rh_dio_read;
|
|
atomic_t netfs_n_rh_readahead;
|
|
atomic_t netfs_n_rh_read_folio;
|
|
atomic_t netfs_n_rh_rreq;
|
|
atomic_t netfs_n_rh_sreq;
|
|
atomic_t netfs_n_rh_download;
|
|
atomic_t netfs_n_rh_download_done;
|
|
atomic_t netfs_n_rh_download_failed;
|
|
atomic_t netfs_n_rh_download_instead;
|
|
atomic_t netfs_n_rh_read;
|
|
atomic_t netfs_n_rh_read_done;
|
|
atomic_t netfs_n_rh_read_failed;
|
|
atomic_t netfs_n_rh_zero;
|
|
atomic_t netfs_n_rh_short_read;
|
|
atomic_t netfs_n_rh_write;
|
|
atomic_t netfs_n_rh_write_begin;
|
|
atomic_t netfs_n_rh_write_done;
|
|
atomic_t netfs_n_rh_write_failed;
|
|
atomic_t netfs_n_rh_write_zskip;
|
|
atomic_t netfs_n_wh_buffered_write;
|
|
atomic_t netfs_n_wh_writethrough;
|
|
atomic_t netfs_n_wh_dio_write;
|
|
atomic_t netfs_n_wh_writepages;
|
|
atomic_t netfs_n_wh_copy_to_cache;
|
|
atomic_t netfs_n_wh_wstream_conflict;
|
|
atomic_t netfs_n_wh_upload;
|
|
atomic_t netfs_n_wh_upload_done;
|
|
atomic_t netfs_n_wh_upload_failed;
|
|
atomic_t netfs_n_wh_write;
|
|
atomic_t netfs_n_wh_write_done;
|
|
atomic_t netfs_n_wh_write_failed;
|
|
atomic_t netfs_n_wb_lock_skip;
|
|
atomic_t netfs_n_wb_lock_wait;
|
|
atomic_t netfs_n_folioq;
|
|
|
|
int netfs_stats_show(struct seq_file *m, void *v)
|
|
{
|
|
seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
|
|
atomic_read(&netfs_n_rh_dio_read),
|
|
atomic_read(&netfs_n_rh_readahead),
|
|
atomic_read(&netfs_n_rh_read_folio),
|
|
atomic_read(&netfs_n_rh_write_begin),
|
|
atomic_read(&netfs_n_rh_write_zskip));
|
|
seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n",
|
|
atomic_read(&netfs_n_wh_buffered_write),
|
|
atomic_read(&netfs_n_wh_writethrough),
|
|
atomic_read(&netfs_n_wh_dio_write),
|
|
atomic_read(&netfs_n_wh_writepages),
|
|
atomic_read(&netfs_n_wh_copy_to_cache));
|
|
seq_printf(m, "ZeroOps: ZR=%u sh=%u sk=%u\n",
|
|
atomic_read(&netfs_n_rh_zero),
|
|
atomic_read(&netfs_n_rh_short_read),
|
|
atomic_read(&netfs_n_rh_write_zskip));
|
|
seq_printf(m, "DownOps: DL=%u ds=%u df=%u di=%u\n",
|
|
atomic_read(&netfs_n_rh_download),
|
|
atomic_read(&netfs_n_rh_download_done),
|
|
atomic_read(&netfs_n_rh_download_failed),
|
|
atomic_read(&netfs_n_rh_download_instead));
|
|
seq_printf(m, "CaRdOps: RD=%u rs=%u rf=%u\n",
|
|
atomic_read(&netfs_n_rh_read),
|
|
atomic_read(&netfs_n_rh_read_done),
|
|
atomic_read(&netfs_n_rh_read_failed));
|
|
seq_printf(m, "UpldOps: UL=%u us=%u uf=%u\n",
|
|
atomic_read(&netfs_n_wh_upload),
|
|
atomic_read(&netfs_n_wh_upload_done),
|
|
atomic_read(&netfs_n_wh_upload_failed));
|
|
seq_printf(m, "CaWrOps: WR=%u ws=%u wf=%u\n",
|
|
atomic_read(&netfs_n_wh_write),
|
|
atomic_read(&netfs_n_wh_write_done),
|
|
atomic_read(&netfs_n_wh_write_failed));
|
|
seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
|
|
atomic_read(&netfs_n_rh_rreq),
|
|
atomic_read(&netfs_n_rh_sreq),
|
|
atomic_read(&netfs_n_folioq),
|
|
atomic_read(&netfs_n_wh_wstream_conflict));
|
|
seq_printf(m, "WbLock : skip=%u wait=%u\n",
|
|
atomic_read(&netfs_n_wb_lock_skip),
|
|
atomic_read(&netfs_n_wb_lock_wait));
|
|
return fscache_stats_show(m);
|
|
}
|
|
EXPORT_SYMBOL(netfs_stats_show);
|