From da6ef2dffe6056aad3435e6cf7c6471c2a62187c Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Thu, 29 Aug 2024 16:34:09 +0800 Subject: [PATCH 01/12] cachefiles: fix dentry leak in cachefiles_open_file() A dentry leak may be caused when a lookup cookie and a cull are concurrent: P1 | P2 ----------------------------------------------------------- cachefiles_lookup_cookie cachefiles_look_up_object lookup_one_positive_unlocked // get dentry cachefiles_cull inode->i_flags |= S_KERNEL_FILE; cachefiles_open_file cachefiles_mark_inode_in_use __cachefiles_mark_inode_in_use can_use = false if (!(inode->i_flags & S_KERNEL_FILE)) can_use = true return false return false // Returns an error but doesn't put dentry After that the following WARNING will be triggered when the backend folder is umounted: ================================================================== BUG: Dentry 000000008ad87947{i=7a,n=Dx_1_1.img} still in use (1) [unmount of ext4 sda] WARNING: CPU: 4 PID: 359261 at fs/dcache.c:1767 umount_check+0x5d/0x70 CPU: 4 PID: 359261 Comm: umount Not tainted 6.6.0-dirty #25 RIP: 0010:umount_check+0x5d/0x70 Call Trace: d_walk+0xda/0x2b0 do_one_tree+0x20/0x40 shrink_dcache_for_umount+0x2c/0x90 generic_shutdown_super+0x20/0x160 kill_block_super+0x1a/0x40 ext4_kill_sb+0x22/0x40 deactivate_locked_super+0x35/0x80 cleanup_mnt+0x104/0x160 ================================================================== Whether cachefiles_open_file() returns true or false, the reference count obtained by lookup_positive_unlocked() in cachefiles_look_up_object() should be released. Therefore release that reference count in cachefiles_look_up_object() to fix the above issue and simplify the code. Fixes: 1f08c925e7a3 ("cachefiles: Implement backing file wrangling") Cc: stable@kernel.org Signed-off-by: Baokun Li Link: https://lore.kernel.org/r/20240829083409.3788142-1-libaokun@huaweicloud.com Acked-by: David Howells Signed-off-by: Christian Brauner --- fs/cachefiles/namei.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index f53977169db4..2b3f9935dbb4 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -595,14 +595,12 @@ static bool cachefiles_open_file(struct cachefiles_object *object, * write and readdir but not lookup or open). */ touch_atime(&file->f_path); - dput(dentry); return true; check_failed: fscache_cookie_lookup_negative(object->cookie); cachefiles_unmark_inode_in_use(object, file); fput(file); - dput(dentry); if (ret == -ESTALE) return cachefiles_create_file(object); return false; @@ -611,7 +609,6 @@ error_fput: fput(file); error: cachefiles_do_unmark_inode_in_use(object, d_inode(dentry)); - dput(dentry); return false; } @@ -654,7 +651,9 @@ bool cachefiles_look_up_object(struct cachefiles_object *object) goto new_file; } - if (!cachefiles_open_file(object, dentry)) + ret = cachefiles_open_file(object, dentry); + dput(dentry); + if (!ret) return false; _leave(" = t [%lu]", file_inode(object->file)->i_ino); From 2cf36327ee1e47733aba96092d7bd082a4056ff5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 14 Sep 2024 21:40:02 +0100 Subject: [PATCH 02/12] afs: Fix missing wire-up of afs_retry_request() afs_retry_request() is supposed to be pointed to by the afs_req_ops netfs operations table, but the pointer got lost somewhere. The function is used during writeback to rotate through the authentication keys that were in force when the file was modified locally. Fix this by adding the pointer to the function. Fixes: 1ecb146f7cd8 ("netfs, afs: Use writeback retry to deal with alternate keys") Reported-by: Dr. David Alan Gilbert Signed-off-by: David Howells Link: https://lore.kernel.org/r/1690847.1726346402@warthog.procyon.org.uk cc: Marc Dionne cc: Jeff Layton cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner --- fs/afs/file.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/afs/file.c b/fs/afs/file.c index 492d857a3fa0..6762eff97517 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -420,6 +420,7 @@ const struct netfs_request_ops afs_req_ops = { .begin_writeback = afs_begin_writeback, .prepare_write = afs_prepare_write, .issue_write = afs_issue_write, + .retry_request = afs_retry_request, }; static void afs_add_open_mmap(struct afs_vnode *vnode) From 8a46067783bdff222d1fb8f8c20e3b7b711e3ce5 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 26 Sep 2024 18:51:46 +0200 Subject: [PATCH 03/12] pidfs: check for valid pid namespace When we access a no-current task's pid namespace we need check that the task hasn't been reaped in the meantime and it's pid namespace isn't accessible anymore. The user namespace is fine because it is only released when the last reference to struct task_struct is put and exit_creds() is called. Link: https://lore.kernel.org/r/20240926-klebt-altgedienten-0415ad4d273c@brauner Fixes: 5b08bd408534 ("pidfs: allow retrieval of namespace file descriptors") CC: stable@vger.kernel.org # v6.11 Signed-off-by: Christian Brauner --- fs/pidfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/pidfs.c b/fs/pidfs.c index 7ffdc88dfb52..80675b6bf884 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -120,6 +120,7 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct nsproxy *nsp __free(put_nsproxy) = NULL; struct pid *pid = pidfd_pid(file); struct ns_common *ns_common = NULL; + struct pid_namespace *pid_ns; if (arg) return -EINVAL; @@ -202,7 +203,9 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PIDFD_GET_PID_NAMESPACE: if (IS_ENABLED(CONFIG_PID_NS)) { rcu_read_lock(); - ns_common = to_ns_common( get_pid_ns(task_active_pid_ns(task))); + pid_ns = task_active_pid_ns(task); + if (pid_ns) + ns_common = to_ns_common(get_pid_ns(pid_ns)); rcu_read_unlock(); } break; From f94d54208f25dc93f3bcae9e725a582380a503b1 Mon Sep 17 00:00:00 2001 From: Marc Dionne Date: Mon, 23 Sep 2024 16:07:49 +0100 Subject: [PATCH 04/12] afs: Fix possible infinite loop with unresponsive servers A return code of 0 from afs_wait_for_one_fs_probe is an indication that the endpoint state attached to the operation is stale and has been superseded. In that case the iteration needs to be restarted so that the newer probe result state gets used. Failure to do so can result in an tight infinite loop around the iterate_address label, where all addresses are thought to be responsive and have been tried, with nothing to refresh the endpoint state. Fixes: 495f2ae9e355 ("afs: Fix fileserver rotation") Reported-by: Markus Suvanto Link: https://lists.infradead.org/pipermail/linux-afs/2024-July/008628.html cc: linux-afs@lists.infradead.org Signed-off-by: Marc Dionne Signed-off-by: David Howells Link: https://lore.kernel.org/r/20240906134019.131553-1-marc.dionne@auristor.com/ Link: https://lore.kernel.org/r/20240923150756.902363-6-dhowells@redhat.com Signed-off-by: Christian Brauner --- fs/afs/fs_probe.c | 4 ++-- fs/afs/rotate.c | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 580de4adaaf6..b516d05b0fef 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -506,10 +506,10 @@ int afs_wait_for_one_fs_probe(struct afs_server *server, struct afs_endpoint_sta finish_wait(&server->probe_wq, &wait); dont_wait: - if (estate->responsive_set & ~exclude) - return 1; if (test_bit(AFS_ESTATE_SUPERSEDED, &estate->flags)) return 0; + if (estate->responsive_set & ~exclude) + return 1; if (is_intr && signal_pending(current)) return -ERESTARTSYS; if (timo == 0) diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index ed09d4d4c211..d612983d6f38 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -632,8 +632,10 @@ iterate_address: wait_for_more_probe_results: error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried, !(op->flags & AFS_OPERATION_UNINTR)); - if (!error) + if (error == 1) goto iterate_address; + if (!error) + goto restart_from_beginning; /* We've now had a failure to respond on all of a server's addresses - * immediately probe them again and consider retrying the server. @@ -644,10 +646,13 @@ wait_for_more_probe_results: error = afs_wait_for_one_fs_probe(op->server, op->estate, op->addr_tried, !(op->flags & AFS_OPERATION_UNINTR)); switch (error) { - case 0: + case 1: op->flags &= ~AFS_OPERATION_RETRY_SERVER; - trace_afs_rotate(op, afs_rotate_trace_retry_server, 0); + trace_afs_rotate(op, afs_rotate_trace_retry_server, 1); goto retry_server; + case 0: + trace_afs_rotate(op, afs_rotate_trace_retry_server, 0); + goto restart_from_beginning; case -ERESTARTSYS: afs_op_set_error(op, error); goto failed; From 19dcfb9c1685d45ba96852e021415134865b0a95 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 23 Sep 2024 16:07:48 +0100 Subject: [PATCH 05/12] afs: Remove unused struct and function prototype The struct afs_address_list and the function prototype afs_put_address_list() are not used anymore and can be removed. Remove them. Signed-off-by: Thorsten Blum Signed-off-by: David Howells Link: https://lore.kernel.org/r/20240911095046.3749-2-thorsten.blum@toblux.com/ Link: https://lore.kernel.org/r/20240923150756.902363-5-dhowells@redhat.com cc: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner --- fs/afs/afs_vl.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h index 9c65ffb8a523..a06296c8827d 100644 --- a/fs/afs/afs_vl.h +++ b/fs/afs/afs_vl.h @@ -134,13 +134,4 @@ struct afs_uvldbentry__xdr { __be32 spares9; }; -struct afs_address_list { - refcount_t usage; - unsigned int version; - unsigned int nr_addrs; - struct sockaddr_rxrpc addrs[]; -}; - -extern void afs_put_address_list(struct afs_address_list *alist); - #endif /* AFS_VL_H */ From ff98751bae40faed1ba9c6a7287e84430f7dec64 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 23 Sep 2024 16:07:50 +0100 Subject: [PATCH 06/12] afs: Fix the setting of the server responding flag In afs_wait_for_operation(), we set transcribe the call responded flag to the server record that we used after doing the fileserver iteration loop - but it's possible to exit the loop having had a response from the server that we've discarded (e.g. it returned an abort or we started receiving data, but the call didn't complete). This means that op->server might be NULL, but we don't check that before attempting to set the server flag. Fixes: 98f9fda2057b ("afs: Fold the afs_addr_cursor struct in") Signed-off-by: David Howells Link: https://lore.kernel.org/r/20240923150756.902363-7-dhowells@redhat.com cc: Marc Dionne cc: linux-afs@lists.infradead.org Signed-off-by: Christian Brauner --- fs/afs/fs_operation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 3546b087e791..428721bbe4f6 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -201,7 +201,7 @@ void afs_wait_for_operation(struct afs_operation *op) } } - if (op->call_responded) + if (op->call_responded && op->server) set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags); if (!afs_op_error(op)) { From 9fffa4e9b3b158f63334e603e610da7d529a0f9a Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 27 Sep 2024 09:08:42 +0100 Subject: [PATCH 07/12] netfs: Advance iterator correctly rather than jumping it In netfs_write_folio(), use iov_iter_advance() to advance the folio as we split bits of it off to subrequests rather than manually jumping the ->iov_offset value around. This becomes more problematic when we use a bounce buffer made out of single-page folios to cover a multipage pagecache folio. Signed-off-by: David Howells Link: https://lore.kernel.org/r/2238548.1727424522@warthog.procyon.org.uk cc: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner --- fs/netfs/write_issue.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 04e66d587f77..f9761d11791d 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -307,6 +307,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, struct netfs_io_stream *stream; struct netfs_group *fgroup; /* TODO: Use this with ceph */ struct netfs_folio *finfo; + size_t iter_off = 0; size_t fsize = folio_size(folio), flen = fsize, foff = 0; loff_t fpos = folio_pos(folio), i_size; bool to_eof = false, streamw = false; @@ -462,7 +463,12 @@ static int netfs_write_folio(struct netfs_io_request *wreq, if (choose_s < 0) break; stream = &wreq->io_streams[choose_s]; - wreq->io_iter.iov_offset = stream->submit_off; + + /* Advance the iterator(s). */ + if (stream->submit_off > iter_off) { + iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off); + iter_off = stream->submit_off; + } atomic64_set(&wreq->issued_to, fpos + stream->submit_off); stream->submit_extendable_to = fsize - stream->submit_off; @@ -477,8 +483,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq, debug = true; } - wreq->io_iter.iov_offset = 0; - iov_iter_advance(&wreq->io_iter, fsize); + if (fsize > iter_off) + iov_iter_advance(&wreq->io_iter, fsize - iter_off); atomic64_set(&wreq->issued_to, fpos + fsize); if (!debug) From 28e8c5c095ec28edeedab5e976e62e0419a89fc1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 30 Sep 2024 11:14:41 +0100 Subject: [PATCH 08/12] netfs: Add folio_queue API documentation Add API documentation for folio_queue. Signed-off-by: David Howells Link: https://lore.kernel.org/r/2912369.1727691281@warthog.procyon.org.uk cc: Jeff Layton cc: netfs@lists.linux.dev cc: linux-doc@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Christian Brauner --- Documentation/core-api/folio_queue.rst | 212 +++++++++++++++++++++++++ include/linux/folio_queue.h | 168 ++++++++++++++++++++ 2 files changed, 380 insertions(+) create mode 100644 Documentation/core-api/folio_queue.rst diff --git a/Documentation/core-api/folio_queue.rst b/Documentation/core-api/folio_queue.rst new file mode 100644 index 000000000000..1fe7a9bc4b8d --- /dev/null +++ b/Documentation/core-api/folio_queue.rst @@ -0,0 +1,212 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +=========== +Folio Queue +=========== + +:Author: David Howells + +.. Contents: + + * Overview + * Initialisation + * Adding and removing folios + * Querying information about a folio + * Querying information about a folio_queue + * Folio queue iteration + * Folio marks + * Lockless simultaneous production/consumption issues + + +Overview +======== + +The folio_queue struct forms a single segment in a segmented list of folios +that can be used to form an I/O buffer. As such, the list can be iterated over +using the ITER_FOLIOQ iov_iter type. + +The publicly accessible members of the structure are:: + + struct folio_queue { + struct folio_queue *next; + struct folio_queue *prev; + ... + }; + +A pair of pointers are provided, ``next`` and ``prev``, that point to the +segments on either side of the segment being accessed. Whilst this is a +doubly-linked list, it is intentionally not a circular list; the outward +sibling pointers in terminal segments should be NULL. + +Each segment in the list also stores: + + * an ordered sequence of folio pointers, + * the size of each folio and + * three 1-bit marks per folio, + +but hese should not be accessed directly as the underlying data structure may +change, but rather the access functions outlined below should be used. + +The facility can be made accessible by:: + + #include + +and to use the iterator:: + + #include + + +Initialisation +============== + +A segment should be initialised by calling:: + + void folioq_init(struct folio_queue *folioq); + +with a pointer to the segment to be initialised. Note that this will not +necessarily initialise all the folio pointers, so care must be taken to check +the number of folios added. + + +Adding and removing folios +========================== + +Folios can be set in the next unused slot in a segment struct by calling one +of:: + + unsigned int folioq_append(struct folio_queue *folioq, + struct folio *folio); + + unsigned int folioq_append_mark(struct folio_queue *folioq, + struct folio *folio); + +Both functions update the stored folio count, store the folio and note its +size. The second function also sets the first mark for the folio added. Both +functions return the number of the slot used. [!] Note that no attempt is made +to check that the capacity wasn't overrun and the list will not be extended +automatically. + +A folio can be excised by calling:: + + void folioq_clear(struct folio_queue *folioq, unsigned int slot); + +This clears the slot in the array and also clears all the marks for that folio, +but doesn't change the folio count - so future accesses of that slot must check +if the slot is occupied. + + +Querying information about a folio +================================== + +Information about the folio in a particular slot may be queried by the +following function:: + + struct folio *folioq_folio(const struct folio_queue *folioq, + unsigned int slot); + +If a folio has not yet been set in that slot, this may yield an undefined +pointer. The size of the folio in a slot may be queried with either of:: + + unsigned int folioq_folio_order(const struct folio_queue *folioq, + unsigned int slot); + + size_t folioq_folio_size(const struct folio_queue *folioq, + unsigned int slot); + +The first function returns the size as an order and the second as a number of +bytes. + + +Querying information about a folio_queue +======================================== + +Information may be retrieved about a particular segment with the following +functions:: + + unsigned int folioq_nr_slots(const struct folio_queue *folioq); + + unsigned int folioq_count(struct folio_queue *folioq); + + bool folioq_full(struct folio_queue *folioq); + +The first function returns the maximum capacity of a segment. It must not be +assumed that this won't vary between segments. The second returns the number +of folios added to a segments and the third is a shorthand to indicate if the +segment has been filled to capacity. + +Not that the count and fullness are not affected by clearing folios from the +segment. These are more about indicating how many slots in the array have been +initialised, and it assumed that slots won't get reused, but rather the segment +will get discarded as the queue is consumed. + + +Folio marks +=========== + +Folios within a queue can also have marks assigned to them. These marks can be +used to note information such as if a folio needs folio_put() calling upon it. +There are three marks available to be set for each folio. + +The marks can be set by:: + + void folioq_mark(struct folio_queue *folioq, unsigned int slot); + void folioq_mark2(struct folio_queue *folioq, unsigned int slot); + void folioq_mark3(struct folio_queue *folioq, unsigned int slot); + +Cleared by:: + + void folioq_unmark(struct folio_queue *folioq, unsigned int slot); + void folioq_unmark2(struct folio_queue *folioq, unsigned int slot); + void folioq_unmark3(struct folio_queue *folioq, unsigned int slot); + +And the marks can be queried by:: + + bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot); + bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot); + bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot); + +The marks can be used for any purpose and are not interpreted by this API. + + +Folio queue iteration +===================== + +A list of segments may be iterated over using the I/O iterator facility using +an ``iov_iter`` iterator of ``ITER_FOLIOQ`` type. The iterator may be +initialised with:: + + void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, + const struct folio_queue *folioq, + unsigned int first_slot, unsigned int offset, + size_t count); + +This may be told to start at a particular segment, slot and offset within a +queue. The iov iterator functions will follow the next pointers when advancing +and prev pointers when reverting when needed. + + +Lockless simultaneous production/consumption issues +=================================================== + +If properly managed, the list can be extended by the producer at the head end +and shortened by the consumer at the tail end simultaneously without the need +to take locks. The ITER_FOLIOQ iterator inserts appropriate barriers to aid +with this. + +Care must be taken when simultaneously producing and consuming a list. If the +last segment is reached and the folios it refers to are entirely consumed by +the IOV iterators, an iov_iter struct will be left pointing to the last segment +with a slot number equal to the capacity of that segment. The iterator will +try to continue on from this if there's another segment available when it is +used again, but care must be taken lest the segment got removed and freed by +the consumer before the iterator was advanced. + +It is recommended that the queue always contain at least one segment, even if +that segment has never been filled or is entirely spent. This prevents the +head and tail pointers from collapsing. + + +API Function Reference +====================== + +.. kernel-doc:: include/linux/folio_queue.h diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index 955680c3bb5f..af871405ae55 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -3,6 +3,12 @@ * * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) + * + * See: + * + * Documentation/core-api/folio_queue.rst + * + * for a description of the API. */ #ifndef _LINUX_FOLIO_QUEUE_H @@ -33,6 +39,13 @@ struct folio_queue { #endif }; +/** + * folioq_init - Initialise a folio queue segment + * @folioq: The segment to initialise + * + * Initialise a folio queue segment. Note that the folio pointers are + * left uninitialised. + */ static inline void folioq_init(struct folio_queue *folioq) { folio_batch_init(&folioq->vec); @@ -43,62 +56,155 @@ static inline void folioq_init(struct folio_queue *folioq) folioq->marks3 = 0; } +/** + * folioq_nr_slots: Query the capacity of a folio queue segment + * @folioq: The segment to query + * + * Query the number of folios that a particular folio queue segment might hold. + * [!] NOTE: This must not be assumed to be the same for every segment! + */ static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq) { return PAGEVEC_SIZE; } +/** + * folioq_count: Query the occupancy of a folio queue segment + * @folioq: The segment to query + * + * Query the number of folios that have been added to a folio queue segment. + * Note that this is not decreased as folios are removed from a segment. + */ static inline unsigned int folioq_count(struct folio_queue *folioq) { return folio_batch_count(&folioq->vec); } +/** + * folioq_count: Query if a folio queue segment is full + * @folioq: The segment to query + * + * Query if a folio queue segment is fully occupied. Note that this does not + * change if folios are removed from a segment. + */ static inline bool folioq_full(struct folio_queue *folioq) { //return !folio_batch_space(&folioq->vec); return folioq_count(folioq) >= folioq_nr_slots(folioq); } +/** + * folioq_is_marked: Check first folio mark in a folio queue segment + * @folioq: The segment to query + * @slot: The slot number of the folio to query + * + * Determine if the first mark is set for the folio in the specified slot in a + * folio queue segment. + */ static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot) { return test_bit(slot, &folioq->marks); } +/** + * folioq_mark: Set the first mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Set the first mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot) { set_bit(slot, &folioq->marks); } +/** + * folioq_unmark: Clear the first mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Clear the first mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot) { clear_bit(slot, &folioq->marks); } +/** + * folioq_is_marked2: Check second folio mark in a folio queue segment + * @folioq: The segment to query + * @slot: The slot number of the folio to query + * + * Determine if the second mark is set for the folio in the specified slot in a + * folio queue segment. + */ static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot) { return test_bit(slot, &folioq->marks2); } +/** + * folioq_mark2: Set the second mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Set the second mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot) { set_bit(slot, &folioq->marks2); } +/** + * folioq_unmark2: Clear the second mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Clear the second mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot) { clear_bit(slot, &folioq->marks2); } +/** + * folioq_is_marked3: Check third folio mark in a folio queue segment + * @folioq: The segment to query + * @slot: The slot number of the folio to query + * + * Determine if the third mark is set for the folio in the specified slot in a + * folio queue segment. + */ static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot) { return test_bit(slot, &folioq->marks3); } +/** + * folioq_mark3: Set the third mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Set the third mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot) { set_bit(slot, &folioq->marks3); } +/** + * folioq_unmark3: Clear the third mark on a folio in a folio queue segment + * @folioq: The segment to modify + * @slot: The slot number of the folio to modify + * + * Clear the third mark for the folio in the specified slot in a folio queue + * segment. + */ static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot) { clear_bit(slot, &folioq->marks3); @@ -111,6 +217,19 @@ static inline unsigned int __folio_order(struct folio *folio) return folio->_flags_1 & 0xff; } +/** + * folioq_append: Add a folio to a folio queue segment + * @folioq: The segment to add to + * @folio: The folio to add + * + * Add a folio to the tail of the sequence in a folio queue segment, increasing + * the occupancy count and returning the slot number for the folio just added. + * The folio size is extracted and stored in the queue and the marks are left + * unmodified. + * + * Note that it's left up to the caller to check that the segment capacity will + * not be exceeded and to extend the queue. + */ static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio) { unsigned int slot = folioq->vec.nr++; @@ -120,6 +239,19 @@ static inline unsigned int folioq_append(struct folio_queue *folioq, struct foli return slot; } +/** + * folioq_append_mark: Add a folio to a folio queue segment + * @folioq: The segment to add to + * @folio: The folio to add + * + * Add a folio to the tail of the sequence in a folio queue segment, increasing + * the occupancy count and returning the slot number for the folio just added. + * The folio size is extracted and stored in the queue, the first mark is set + * and and the second and third marks are left unmodified. + * + * Note that it's left up to the caller to check that the segment capacity will + * not be exceeded and to extend the queue. + */ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio) { unsigned int slot = folioq->vec.nr++; @@ -130,21 +262,57 @@ static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct return slot; } +/** + * folioq_folio: Get a folio from a folio queue segment + * @folioq: The segment to access + * @slot: The folio slot to access + * + * Retrieve the folio in the specified slot from a folio queue segment. Note + * that no bounds check is made and if the slot hasn't been added into yet, the + * pointer will be undefined. If the slot has been cleared, NULL will be + * returned. + */ static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot) { return folioq->vec.folios[slot]; } +/** + * folioq_folio_order: Get the order of a folio from a folio queue segment + * @folioq: The segment to access + * @slot: The folio slot to access + * + * Retrieve the order of the folio in the specified slot from a folio queue + * segment. Note that no bounds check is made and if the slot hasn't been + * added into yet, the order returned will be 0. + */ static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot) { return folioq->orders[slot]; } +/** + * folioq_folio_size: Get the size of a folio from a folio queue segment + * @folioq: The segment to access + * @slot: The folio slot to access + * + * Retrieve the size of the folio in the specified slot from a folio queue + * segment. Note that no bounds check is made and if the slot hasn't been + * added into yet, the size returned will be PAGE_SIZE. + */ static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot) { return PAGE_SIZE << folioq_folio_order(folioq, slot); } +/** + * folioq_clear: Clear a folio from a folio queue segment + * @folioq: The segment to clear + * @slot: The folio slot to clear + * + * Clear a folio from a sequence in a folio queue segment and clear its marks. + * The occupancy count is left unchanged. + */ static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot) { folioq->vec.folios[slot] = NULL; From f801850bc263d7fa0a4e6d9a36cddf4966c79c14 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 30 Sep 2024 12:59:16 +0100 Subject: [PATCH 09/12] netfs: Fix the netfs_folio tracepoint to handle NULL mapping Fix the netfs_folio tracepoint to handle folios that have a NULL mapping pointer. In such a case, just substitute a zero inode number. Fixes: c38f4e96e605 ("netfs: Provide func to copy data to pagecache for buffered write") Signed-off-by: David Howells Link: https://lore.kernel.org/r/2917423.1727697556@warthog.procyon.org.uk cc: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner --- include/trace/events/netfs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 76bd42a96815..1d7c52821e55 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -448,7 +448,8 @@ TRACE_EVENT(netfs_folio, ), TP_fast_assign( - __entry->ino = folio->mapping->host->i_ino; + struct address_space *__m = READ_ONCE(folio->mapping); + __entry->ino = __m ? __m->host->i_ino : 0; __entry->why = why; __entry->index = folio_index(folio); __entry->nr = folio_nr_pages(folio); From fcd4904e2f6908d5c255fa5818bcf8ad32a6f0e8 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 5 Oct 2024 19:23:03 +0100 Subject: [PATCH 10/12] netfs: Remove call to folio_index() Calling folio_index() is pointless overhead; directly dereferencing folio->index is fine. Signed-off-by: Matthew Wilcox (Oracle) Link: https://lore.kernel.org/r/20241005182307.3190401-2-willy@infradead.org Signed-off-by: Christian Brauner --- include/trace/events/netfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 1d7c52821e55..72a208fd4496 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -451,7 +451,7 @@ TRACE_EVENT(netfs_folio, struct address_space *__m = READ_ONCE(folio->mapping); __entry->ino = __m ? __m->host->i_ino : 0; __entry->why = why; - __entry->index = folio_index(folio); + __entry->index = folio->index; __entry->nr = folio_nr_pages(folio); ), From c6a90fe7f080d71271b723490454cfda1f81e4b0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 5 Oct 2024 19:23:04 +0100 Subject: [PATCH 11/12] netfs: Fix a few minor bugs in netfs_page_mkwrite() We can't return with VM_FAULT_SIGBUS | VM_FAULT_LOCKED; the core code will not unlock the folio in this instance. Introduce a new "unlock" error exit to handle this case. Use it to handle the "folio is truncated" check, and change the "writeback interrupted by a fatal signal" to do a NOPAGE exit instead of letting the core code install the folio currently under writeback before killing the process. Signed-off-by: Matthew Wilcox (Oracle) Link: https://lore.kernel.org/r/20241005182307.3190401-3-willy@infradead.org Signed-off-by: Christian Brauner --- fs/netfs/buffered_write.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index b3910dfcb56d..ff2814da88b1 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -491,7 +491,9 @@ EXPORT_SYMBOL(netfs_file_write_iter); /* * Notification that a previously read-only page is about to become writable. - * Note that the caller indicates a single page of a multipage folio. + * The caller indicates the precise page that needs to be written to, but + * we only track group on a per-folio basis, so we block more often than + * we might otherwise. */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) { @@ -501,7 +503,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr struct address_space *mapping = file->f_mapping; struct inode *inode = file_inode(file); struct netfs_inode *ictx = netfs_inode(inode); - vm_fault_t ret = VM_FAULT_RETRY; + vm_fault_t ret = VM_FAULT_NOPAGE; int err; _enter("%lx", folio->index); @@ -510,21 +512,15 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr if (folio_lock_killable(folio) < 0) goto out; - if (folio->mapping != mapping) { - folio_unlock(folio); - ret = VM_FAULT_NOPAGE; - goto out; - } - - if (folio_wait_writeback_killable(folio)) { - ret = VM_FAULT_LOCKED; - goto out; - } + if (folio->mapping != mapping) + goto unlock; + if (folio_wait_writeback_killable(folio) < 0) + goto unlock; /* Can we see a streaming write here? */ if (WARN_ON(!folio_test_uptodate(folio))) { - ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED; - goto out; + ret = VM_FAULT_SIGBUS; + goto unlock; } group = netfs_folio_group(folio); @@ -559,5 +555,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr out: sb_end_pagefault(inode->i_sb); return ret; +unlock: + folio_unlock(folio); + goto out; } EXPORT_SYMBOL(netfs_page_mkwrite); From e995e8b600260cff3cfaf2607a62be8bdc4aa9c7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 5 Oct 2024 19:23:05 +0100 Subject: [PATCH 12/12] netfs: Remove unnecessary references to pages These places should all use folios instead of pages. Signed-off-by: Matthew Wilcox (Oracle) Link: https://lore.kernel.org/r/20241005182307.3190401-4-willy@infradead.org Signed-off-by: Christian Brauner --- fs/netfs/buffered_read.c | 8 ++++---- fs/netfs/buffered_write.c | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index c40e226053cc..17aaec00002b 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -646,7 +646,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len, if (unlikely(always_fill)) { if (pos - offset + len <= i_size) return false; /* Page entirely before EOF */ - zero_user_segment(&folio->page, 0, plen); + folio_zero_segment(folio, 0, plen); folio_mark_uptodate(folio); return true; } @@ -665,7 +665,7 @@ static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len, return false; zero_out: - zero_user_segments(&folio->page, 0, offset, offset + len, plen); + folio_zero_segments(folio, 0, offset, offset + len, plen); return true; } @@ -732,7 +732,7 @@ retry: if (folio_test_uptodate(folio)) goto have_folio; - /* If the page is beyond the EOF, we want to clear it - unless it's + /* If the folio is beyond the EOF, we want to clear it - unless it's * within the cache granule containing the EOF, in which case we need * to preload the granule. */ @@ -792,7 +792,7 @@ error: EXPORT_SYMBOL(netfs_write_begin); /* - * Preload the data into a page we're proposing to write into. + * Preload the data into a folio we're proposing to write into. */ int netfs_prefetch_for_write(struct file *file, struct folio *folio, size_t offset, size_t len) diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index ff2814da88b1..b4826360a411 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -83,13 +83,13 @@ static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode, * netfs_perform_write - Copy data into the pagecache. * @iocb: The operation parameters * @iter: The source buffer - * @netfs_group: Grouping for dirty pages (eg. ceph snaps). + * @netfs_group: Grouping for dirty folios (eg. ceph snaps). * - * Copy data into pagecache pages attached to the inode specified by @iocb. + * Copy data into pagecache folios attached to the inode specified by @iocb. * The caller must hold appropriate inode locks. * - * Dirty pages are tagged with a netfs_folio struct if they're not up to date - * to indicate the range modified. Dirty pages may also be tagged with a + * Dirty folios are tagged with a netfs_folio struct if they're not up to date + * to indicate the range modified. Dirty folios may also be tagged with a * netfs-specific grouping such that data from an old group gets flushed before * a new one is started. */ @@ -223,11 +223,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, * we try to read it. */ if (fpos >= ctx->zero_point) { - zero_user_segment(&folio->page, 0, offset); + folio_zero_segment(folio, 0, offset); copied = copy_folio_from_iter_atomic(folio, offset, part, iter); if (unlikely(copied == 0)) goto copy_failed; - zero_user_segment(&folio->page, offset + copied, flen); + folio_zero_segment(folio, offset + copied, flen); __netfs_set_group(folio, netfs_group); folio_mark_uptodate(folio); trace_netfs_folio(folio, netfs_modify_and_clear); @@ -407,7 +407,7 @@ EXPORT_SYMBOL(netfs_perform_write); * netfs_buffered_write_iter_locked - write data to a file * @iocb: IO state structure (file, offset, etc.) * @from: iov_iter with data to write - * @netfs_group: Grouping for dirty pages (eg. ceph snaps). + * @netfs_group: Grouping for dirty folios (eg. ceph snaps). * * This function does all the work needed for actually writing data to a * file. It does all basic checks, removes SUID from the file, updates