2021-06-17 12:09:21 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2024-03-18 20:14:32 +00:00
|
|
|
/* Network filesystem high-level buffered write support.
|
2021-06-17 12:09:21 +00:00
|
|
|
*
|
|
|
|
* Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/pagevec.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
static void __netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
|
2021-06-17 12:09:21 +00:00
|
|
|
{
|
2024-06-05 10:33:01 +00:00
|
|
|
if (netfs_group)
|
netfs: Replace PG_fscache by setting folio->private and marking dirty
When dirty data is being written to the cache, setting/waiting on/clearing
the fscache flag is always done in tandem with setting/waiting on/clearing
the writeback flag. The netfslib buffered write routines wait on and set
both flags and the write request cleanup clears both flags, so the fscache
flag is almost superfluous.
The reason it isn't superfluous is because the fscache flag is also used to
indicate that data just read from the server is being written to the cache.
The flag is used to prevent a race involving overlapping direct-I/O writes
to the cache.
Change this to indicate that a page is in need of being copied to the cache
by placing a magic value in folio->private and marking the folios dirty.
Then when the writeback code sees a folio marked in this way, it only
writes it to the cache and not to the server.
If a folio that has this magic value set is modified, the value is just
replaced and the folio will then be uplodaded too.
With this, PG_fscache is no longer required by the netfslib core, 9p and
afs.
Ceph and nfs, however, still need to use the old PG_fscache-based tracking.
To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the
flags in the netfs_inode struct for those filesystems. This reenables the
use of PG_fscache in that inode. 9p and afs use the netfslib write helpers
so get switched over; cifs, for the moment, does page-by-page manual access
to the cache, so doesn't use PG_fscache and is unaffected.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Ronnie Sahlberg <ronniesahlberg@gmail.com>
cc: Shyam Prasad N <sprasad@microsoft.com>
cc: Tom Talpey <tom@talpey.com>
cc: Bharath SM <bharathsm@microsoft.com>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Anna Schumaker <anna@kernel.org>
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
2024-03-19 10:00:09 +00:00
|
|
|
folio_attach_private(folio, netfs_get_group(netfs_group));
|
2023-09-28 10:46:49 +00:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
|
2021-06-17 12:09:21 +00:00
|
|
|
{
|
2024-06-05 10:33:01 +00:00
|
|
|
void *priv = folio_get_private(folio);
|
2021-06-17 12:09:21 +00:00
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
if (unlikely(priv != netfs_group)) {
|
|
|
|
if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
|
|
|
|
folio_attach_private(folio, netfs_get_group(netfs_group));
|
|
|
|
else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
|
|
|
|
folio_detach_private(folio);
|
2024-01-04 15:52:11 +00:00
|
|
|
}
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-09-29 19:11:31 +00:00
|
|
|
* Grab a folio for writing and lock it. Attempt to allocate as large a folio
|
|
|
|
* as possible to hold as much of the remaining length as possible in one go.
|
2021-06-17 12:09:21 +00:00
|
|
|
*/
|
|
|
|
static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
|
|
|
|
loff_t pos, size_t part)
|
|
|
|
{
|
|
|
|
pgoff_t index = pos / PAGE_SIZE;
|
2023-09-29 19:11:31 +00:00
|
|
|
fgf_t fgp_flags = FGP_WRITEBEGIN;
|
2021-06-17 12:09:21 +00:00
|
|
|
|
2023-09-29 19:11:31 +00:00
|
|
|
if (mapping_large_folio_support(mapping))
|
|
|
|
fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
|
|
|
|
|
|
|
|
return __filemap_get_folio(mapping, index, fgp_flags,
|
2021-06-17 12:09:21 +00:00
|
|
|
mapping_gfp_mask(mapping));
|
|
|
|
}
|
|
|
|
|
2024-02-23 08:04:33 +00:00
|
|
|
/*
|
|
|
|
* Update i_size and estimate the update to i_blocks to reflect the additional
|
|
|
|
* data written into the pagecache until we can find out from the server what
|
|
|
|
* the values actually are.
|
|
|
|
*/
|
|
|
|
static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
|
|
|
|
loff_t i_size, loff_t pos, size_t copied)
|
|
|
|
{
|
|
|
|
blkcnt_t add;
|
|
|
|
size_t gap;
|
|
|
|
|
|
|
|
if (ctx->ops->update_i_size) {
|
|
|
|
ctx->ops->update_i_size(inode, pos);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
i_size_write(inode, pos);
|
|
|
|
#if IS_ENABLED(CONFIG_FSCACHE)
|
|
|
|
fscache_update_cookie(ctx->cache, NULL, &pos);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
|
|
|
|
if (copied > gap) {
|
|
|
|
add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
|
|
|
|
|
|
|
|
inode->i_blocks = min_t(blkcnt_t,
|
|
|
|
DIV_ROUND_UP(pos, SECTOR_SIZE),
|
|
|
|
inode->i_blocks + add);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
/**
|
|
|
|
* netfs_perform_write - Copy data into the pagecache.
|
|
|
|
* @iocb: The operation parameters
|
|
|
|
* @iter: The source buffer
|
2024-10-05 18:23:05 +00:00
|
|
|
* @netfs_group: Grouping for dirty folios (eg. ceph snaps).
|
2021-06-17 12:09:21 +00:00
|
|
|
*
|
2024-10-05 18:23:05 +00:00
|
|
|
* Copy data into pagecache folios attached to the inode specified by @iocb.
|
2021-06-17 12:09:21 +00:00
|
|
|
* The caller must hold appropriate inode locks.
|
|
|
|
*
|
2024-10-05 18:23:05 +00:00
|
|
|
* Dirty folios are tagged with a netfs_folio struct if they're not up to date
|
|
|
|
* to indicate the range modified. Dirty folios may also be tagged with a
|
2021-06-17 12:09:21 +00:00
|
|
|
* netfs-specific grouping such that data from an old group gets flushed before
|
|
|
|
* a new one is started.
|
|
|
|
*/
|
|
|
|
ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
|
|
|
struct netfs_group *netfs_group)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
struct netfs_inode *ctx = netfs_inode(inode);
|
2023-10-12 08:06:24 +00:00
|
|
|
struct writeback_control wbc = {
|
|
|
|
.sync_mode = WB_SYNC_NONE,
|
|
|
|
.for_sync = true,
|
|
|
|
.nr_to_write = LONG_MAX,
|
|
|
|
.range_start = iocb->ki_pos,
|
|
|
|
.range_end = iocb->ki_pos + iter->count,
|
|
|
|
};
|
|
|
|
struct netfs_io_request *wreq = NULL;
|
2024-06-05 10:33:01 +00:00
|
|
|
struct folio *folio = NULL, *writethrough = NULL;
|
2024-05-21 15:49:46 +00:00
|
|
|
unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
|
2024-04-17 08:47:19 +00:00
|
|
|
ssize_t written = 0, ret, ret2;
|
2024-06-05 10:33:01 +00:00
|
|
|
loff_t i_size, pos = iocb->ki_pos;
|
2024-05-27 20:17:32 +00:00
|
|
|
size_t max_chunk = mapping_max_folio_size(mapping);
|
2021-06-17 12:09:21 +00:00
|
|
|
bool maybe_trouble = false;
|
|
|
|
|
2023-10-12 08:06:24 +00:00
|
|
|
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
|
|
|
|
iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
|
|
|
|
) {
|
|
|
|
wbc_attach_fdatawrite_inode(&wbc, mapping->host);
|
|
|
|
|
2024-04-26 11:15:15 +00:00
|
|
|
ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
|
|
|
|
if (ret < 0) {
|
|
|
|
wbc_detach_inode(&wbc);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2023-10-12 08:06:24 +00:00
|
|
|
wreq = netfs_begin_writethrough(iocb, iter->count);
|
|
|
|
if (IS_ERR(wreq)) {
|
|
|
|
wbc_detach_inode(&wbc);
|
|
|
|
ret = PTR_ERR(wreq);
|
|
|
|
wreq = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!is_sync_kiocb(iocb))
|
|
|
|
wreq->iocb = iocb;
|
2024-03-26 08:48:44 +00:00
|
|
|
netfs_stat(&netfs_n_wh_writethrough);
|
|
|
|
} else {
|
|
|
|
netfs_stat(&netfs_n_wh_buffered_write);
|
2023-10-12 08:06:24 +00:00
|
|
|
}
|
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
do {
|
2024-06-05 10:33:01 +00:00
|
|
|
struct netfs_folio *finfo;
|
|
|
|
struct netfs_group *group;
|
|
|
|
unsigned long long fpos;
|
2021-06-17 12:09:21 +00:00
|
|
|
size_t flen;
|
|
|
|
size_t offset; /* Offset into pagecache folio */
|
|
|
|
size_t part; /* Bytes to write to folio */
|
|
|
|
size_t copied; /* Bytes copied from user */
|
|
|
|
|
|
|
|
offset = pos & (max_chunk - 1);
|
|
|
|
part = min(max_chunk - offset, iov_iter_count(iter));
|
|
|
|
|
|
|
|
/* Bring in the user pages that we will copy from _first_ lest
|
|
|
|
* we hit a nasty deadlock on copying from the same page as
|
|
|
|
* we're writing to, without it being marked uptodate.
|
|
|
|
*
|
|
|
|
* Not only is this an optimisation, but it is also required to
|
|
|
|
* check that the address is actually valid, when atomic
|
|
|
|
* usercopies are used below.
|
|
|
|
*
|
|
|
|
* We rely on the page being held onto long enough by the LRU
|
|
|
|
* that we can grab it below if this causes it to be read.
|
|
|
|
*/
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
|
|
|
|
break;
|
|
|
|
|
|
|
|
folio = netfs_grab_folio_for_write(mapping, pos, part);
|
2024-01-10 18:54:42 +00:00
|
|
|
if (IS_ERR(folio)) {
|
|
|
|
ret = PTR_ERR(folio);
|
2021-06-17 12:09:21 +00:00
|
|
|
break;
|
2024-01-10 18:54:42 +00:00
|
|
|
}
|
2021-06-17 12:09:21 +00:00
|
|
|
|
|
|
|
flen = folio_size(folio);
|
2024-06-05 10:33:01 +00:00
|
|
|
fpos = folio_pos(folio);
|
|
|
|
offset = pos - fpos;
|
2021-06-17 12:09:21 +00:00
|
|
|
part = min_t(size_t, flen - offset, part);
|
|
|
|
|
2024-03-08 12:36:05 +00:00
|
|
|
/* Wait for writeback to complete. The writeback engine owns
|
|
|
|
* the info in folio->private and may change it until it
|
|
|
|
* removes the WB mark.
|
|
|
|
*/
|
|
|
|
if (folio_get_private(folio) &&
|
|
|
|
folio_wait_writeback_killable(folio)) {
|
|
|
|
ret = written ? -EINTR : -ERESTARTSYS;
|
|
|
|
goto error_folio_unlock;
|
|
|
|
}
|
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
if (signal_pending(current)) {
|
|
|
|
ret = written ? -EINTR : -ERESTARTSYS;
|
|
|
|
goto error_folio_unlock;
|
|
|
|
}
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
/* Decide how we should modify a folio. We might be attempting
|
|
|
|
* to do write-streaming, in which case we don't want to a
|
|
|
|
* local RMW cycle if we can avoid it. If we're doing local
|
|
|
|
* caching or content crypto, we award that priority over
|
|
|
|
* avoiding RMW. If the file is open readably, then we also
|
|
|
|
* assume that we may want to read what we wrote.
|
2021-06-17 12:09:21 +00:00
|
|
|
*/
|
2024-06-05 10:33:01 +00:00
|
|
|
finfo = netfs_folio_info(folio);
|
|
|
|
group = netfs_folio_group(folio);
|
|
|
|
|
|
|
|
if (unlikely(group != netfs_group) &&
|
|
|
|
group != NETFS_FOLIO_COPY_TO_CACHE)
|
|
|
|
goto flush_content;
|
|
|
|
|
|
|
|
if (folio_test_uptodate(folio)) {
|
|
|
|
if (mapping_writably_mapped(mapping))
|
|
|
|
flush_dcache_folio(folio);
|
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
|
|
|
netfs_set_group(folio, netfs_group);
|
|
|
|
trace_netfs_folio(folio, netfs_folio_is_uptodate);
|
|
|
|
goto copied;
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
/* If the page is above the zero-point then we assume that the
|
|
|
|
* server would just return a block of zeros or a short read if
|
|
|
|
* we try to read it.
|
|
|
|
*/
|
|
|
|
if (fpos >= ctx->zero_point) {
|
2024-10-05 18:23:05 +00:00
|
|
|
folio_zero_segment(folio, 0, offset);
|
2024-06-05 10:33:01 +00:00
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
2024-10-05 18:23:05 +00:00
|
|
|
folio_zero_segment(folio, offset + copied, flen);
|
2024-06-05 10:33:01 +00:00
|
|
|
__netfs_set_group(folio, netfs_group);
|
2021-06-17 12:09:21 +00:00
|
|
|
folio_mark_uptodate(folio);
|
2024-06-05 10:33:01 +00:00
|
|
|
trace_netfs_folio(folio, netfs_modify_and_clear);
|
|
|
|
goto copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See if we can write a whole folio in one go. */
|
|
|
|
if (!maybe_trouble && offset == 0 && part >= flen) {
|
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
2021-06-17 12:09:21 +00:00
|
|
|
if (unlikely(copied < part)) {
|
|
|
|
maybe_trouble = true;
|
|
|
|
iov_iter_revert(iter, copied);
|
|
|
|
copied = 0;
|
2024-03-08 12:36:05 +00:00
|
|
|
folio_unlock(folio);
|
2021-06-17 12:09:21 +00:00
|
|
|
goto retry;
|
|
|
|
}
|
2024-06-05 10:33:01 +00:00
|
|
|
__netfs_set_group(folio, netfs_group);
|
2021-06-17 12:09:21 +00:00
|
|
|
folio_mark_uptodate(folio);
|
2024-06-05 10:33:01 +00:00
|
|
|
trace_netfs_folio(folio, netfs_whole_folio_modify);
|
|
|
|
goto copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We don't want to do a streaming write on a file that loses
|
|
|
|
* caching service temporarily because the backing store got
|
|
|
|
* culled and we don't really want to get a streaming write on
|
|
|
|
* a file that's open for reading as ->read_folio() then has to
|
|
|
|
* be able to flush it.
|
|
|
|
*/
|
|
|
|
if ((file->f_mode & FMODE_READ) ||
|
|
|
|
netfs_is_cache_enabled(ctx)) {
|
|
|
|
if (finfo) {
|
|
|
|
netfs_stat(&netfs_n_wh_wstream_conflict);
|
|
|
|
goto flush_content;
|
|
|
|
}
|
|
|
|
ret = netfs_prefetch_for_write(file, folio, offset, part);
|
|
|
|
if (ret < 0) {
|
|
|
|
_debug("prefetch = %zd", ret);
|
|
|
|
goto error_folio_unlock;
|
|
|
|
}
|
|
|
|
/* Note that copy-to-cache may have been set. */
|
|
|
|
|
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
|
|
|
netfs_set_group(folio, netfs_group);
|
|
|
|
trace_netfs_folio(folio, netfs_just_prefetch);
|
|
|
|
goto copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!finfo) {
|
|
|
|
ret = -EIO;
|
|
|
|
if (WARN_ON(folio_get_private(folio)))
|
|
|
|
goto error_folio_unlock;
|
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
2021-06-17 12:09:21 +00:00
|
|
|
if (offset == 0 && copied == flen) {
|
2024-06-05 10:33:01 +00:00
|
|
|
__netfs_set_group(folio, netfs_group);
|
2021-06-17 12:09:21 +00:00
|
|
|
folio_mark_uptodate(folio);
|
2024-06-05 10:33:01 +00:00
|
|
|
trace_netfs_folio(folio, netfs_streaming_filled_page);
|
|
|
|
goto copied;
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
2024-06-05 10:33:01 +00:00
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
|
|
|
|
if (!finfo) {
|
|
|
|
iov_iter_revert(iter, copied);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error_folio_unlock;
|
|
|
|
}
|
|
|
|
finfo->netfs_group = netfs_get_group(netfs_group);
|
|
|
|
finfo->dirty_offset = offset;
|
|
|
|
finfo->dirty_len = copied;
|
|
|
|
folio_attach_private(folio, (void *)((unsigned long)finfo |
|
|
|
|
NETFS_FOLIO_INFO));
|
2024-06-05 10:33:01 +00:00
|
|
|
trace_netfs_folio(folio, netfs_streaming_write);
|
|
|
|
goto copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We can continue a streaming write only if it continues on
|
|
|
|
* from the previous. If it overlaps, we must flush lest we
|
|
|
|
* suffer a partial copy and disjoint dirty regions.
|
|
|
|
*/
|
|
|
|
if (offset == finfo->dirty_offset + finfo->dirty_len) {
|
|
|
|
copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
|
|
|
|
if (unlikely(copied == 0))
|
|
|
|
goto copy_failed;
|
2021-06-17 12:09:21 +00:00
|
|
|
finfo->dirty_len += copied;
|
|
|
|
if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
|
|
|
|
if (finfo->netfs_group)
|
|
|
|
folio_change_private(folio, finfo->netfs_group);
|
|
|
|
else
|
|
|
|
folio_detach_private(folio);
|
|
|
|
folio_mark_uptodate(folio);
|
|
|
|
kfree(finfo);
|
2024-06-05 10:33:01 +00:00
|
|
|
trace_netfs_folio(folio, netfs_streaming_cont_filled_page);
|
|
|
|
} else {
|
|
|
|
trace_netfs_folio(folio, netfs_streaming_write_cont);
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
2024-06-05 10:33:01 +00:00
|
|
|
goto copied;
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
/* Incompatible write; flush the folio and try again. */
|
|
|
|
flush_content:
|
|
|
|
trace_netfs_folio(folio, netfs_flush_content);
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
|
|
|
ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error_folio_unlock;
|
|
|
|
continue;
|
|
|
|
|
|
|
|
copied:
|
|
|
|
flush_dcache_folio(folio);
|
2021-06-17 12:09:21 +00:00
|
|
|
|
|
|
|
/* Update the inode size if we moved the EOF marker */
|
|
|
|
pos += copied;
|
2024-02-23 08:04:33 +00:00
|
|
|
i_size = i_size_read(inode);
|
|
|
|
if (pos > i_size)
|
|
|
|
netfs_update_i_size(ctx, inode, i_size, pos, copied);
|
2021-06-17 12:09:21 +00:00
|
|
|
written += copied;
|
|
|
|
|
2023-10-12 08:06:24 +00:00
|
|
|
if (likely(!wreq)) {
|
|
|
|
folio_mark_dirty(folio);
|
2024-03-08 12:36:05 +00:00
|
|
|
folio_unlock(folio);
|
2023-10-12 08:06:24 +00:00
|
|
|
} else {
|
2024-03-08 12:36:05 +00:00
|
|
|
netfs_advance_writethrough(wreq, &wbc, folio, copied,
|
|
|
|
offset + copied == flen,
|
|
|
|
&writethrough);
|
|
|
|
/* Folio unlocked */
|
2023-10-12 08:06:24 +00:00
|
|
|
}
|
2021-06-17 12:09:21 +00:00
|
|
|
retry:
|
|
|
|
folio_put(folio);
|
|
|
|
folio = NULL;
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
break;
|
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
cond_resched();
|
|
|
|
} while (iov_iter_count(iter));
|
|
|
|
|
|
|
|
out:
|
2024-06-05 10:26:24 +00:00
|
|
|
if (likely(written)) {
|
|
|
|
/* Set indication that ctime and mtime got updated in case
|
|
|
|
* close is deferred.
|
|
|
|
*/
|
|
|
|
set_bit(NETFS_ICTX_MODIFIED_ATTR, &ctx->flags);
|
|
|
|
if (unlikely(ctx->ops->post_modify))
|
|
|
|
ctx->ops->post_modify(inode);
|
|
|
|
}
|
2023-10-06 17:16:15 +00:00
|
|
|
|
2023-10-12 08:06:24 +00:00
|
|
|
if (unlikely(wreq)) {
|
2024-03-08 12:36:05 +00:00
|
|
|
ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
|
2023-10-12 08:06:24 +00:00
|
|
|
wbc_detach_inode(&wbc);
|
2024-04-17 08:47:19 +00:00
|
|
|
if (ret2 == -EIOCBQUEUED)
|
|
|
|
return ret2;
|
|
|
|
if (ret == 0)
|
|
|
|
ret = ret2;
|
2021-06-17 12:09:21 +00:00
|
|
|
}
|
|
|
|
|
2023-10-12 08:06:24 +00:00
|
|
|
iocb->ki_pos += written;
|
2024-07-18 20:07:32 +00:00
|
|
|
_leave(" = %zd [%zd]", written, ret);
|
2021-06-17 12:09:21 +00:00
|
|
|
return written ? written : ret;
|
|
|
|
|
2024-06-05 10:33:01 +00:00
|
|
|
copy_failed:
|
|
|
|
ret = -EFAULT;
|
2021-06-17 12:09:21 +00:00
|
|
|
error_folio_unlock:
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_perform_write);
|
2021-06-17 12:09:21 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* netfs_buffered_write_iter_locked - write data to a file
|
|
|
|
* @iocb: IO state structure (file, offset, etc.)
|
|
|
|
* @from: iov_iter with data to write
|
2024-10-05 18:23:05 +00:00
|
|
|
* @netfs_group: Grouping for dirty folios (eg. ceph snaps).
|
2021-06-17 12:09:21 +00:00
|
|
|
*
|
|
|
|
* This function does all the work needed for actually writing data to a
|
|
|
|
* file. It does all basic checks, removes SUID from the file, updates
|
|
|
|
* modification times and calls proper subroutines depending on whether we
|
|
|
|
* do direct IO or a standard buffered write.
|
|
|
|
*
|
|
|
|
* The caller must hold appropriate locks around this function and have called
|
|
|
|
* generic_write_checks() already. The caller is also responsible for doing
|
|
|
|
* any necessary syncing afterwards.
|
|
|
|
*
|
|
|
|
* This function does *not* take care of syncing data in case of O_SYNC write.
|
|
|
|
* A caller has to handle it. This is mainly due to the fact that we want to
|
|
|
|
* avoid syncing under i_rwsem.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * number of bytes written, even for truncated writes
|
|
|
|
* * negative error code if no data has been written at all
|
|
|
|
*/
|
|
|
|
ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
|
|
|
|
struct netfs_group *netfs_group)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
trace_netfs_write_iter(iocb, from);
|
|
|
|
|
|
|
|
ret = file_remove_privs(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = file_update_time(file);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return netfs_perform_write(iocb, from, netfs_group);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* netfs_file_write_iter - write data to a file
|
|
|
|
* @iocb: IO state structure
|
|
|
|
* @from: iov_iter with data to write
|
|
|
|
*
|
|
|
|
* Perform a write to a file, writing into the pagecache if possible and doing
|
|
|
|
* an unbuffered write instead if not.
|
|
|
|
*
|
|
|
|
* Return:
|
|
|
|
* * Negative error code if no data has been written at all of
|
|
|
|
* vfs_fsync_range() failed for a synchronous write
|
|
|
|
* * Number of bytes written, even for truncated writes
|
|
|
|
*/
|
|
|
|
ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
|
struct netfs_inode *ictx = netfs_inode(inode);
|
|
|
|
ssize_t ret;
|
|
|
|
|
2024-07-18 20:07:32 +00:00
|
|
|
_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
|
2021-06-17 12:09:21 +00:00
|
|
|
|
2024-01-29 09:49:19 +00:00
|
|
|
if (!iov_iter_count(from))
|
|
|
|
return 0;
|
|
|
|
|
2021-06-17 12:09:21 +00:00
|
|
|
if ((iocb->ki_flags & IOCB_DIRECT) ||
|
|
|
|
test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
|
|
|
|
return netfs_unbuffered_write_iter(iocb, from);
|
|
|
|
|
|
|
|
ret = netfs_start_io_write(inode);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = generic_write_checks(iocb, from);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
|
|
|
|
netfs_end_io_write(inode);
|
|
|
|
if (ret > 0)
|
|
|
|
ret = generic_write_sync(iocb, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_file_write_iter);
|
2022-02-15 23:15:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Notification that a previously read-only page is about to become writable.
|
2024-10-05 18:23:04 +00:00
|
|
|
* The caller indicates the precise page that needs to be written to, but
|
|
|
|
* we only track group on a per-folio basis, so we block more often than
|
|
|
|
* we might otherwise.
|
2022-02-15 23:15:57 +00:00
|
|
|
*/
|
|
|
|
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
|
|
|
|
{
|
netfs: Replace PG_fscache by setting folio->private and marking dirty
When dirty data is being written to the cache, setting/waiting on/clearing
the fscache flag is always done in tandem with setting/waiting on/clearing
the writeback flag. The netfslib buffered write routines wait on and set
both flags and the write request cleanup clears both flags, so the fscache
flag is almost superfluous.
The reason it isn't superfluous is because the fscache flag is also used to
indicate that data just read from the server is being written to the cache.
The flag is used to prevent a race involving overlapping direct-I/O writes
to the cache.
Change this to indicate that a page is in need of being copied to the cache
by placing a magic value in folio->private and marking the folios dirty.
Then when the writeback code sees a folio marked in this way, it only
writes it to the cache and not to the server.
If a folio that has this magic value set is modified, the value is just
replaced and the folio will then be uplodaded too.
With this, PG_fscache is no longer required by the netfslib core, 9p and
afs.
Ceph and nfs, however, still need to use the old PG_fscache-based tracking.
To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the
flags in the netfs_inode struct for those filesystems. This reenables the
use of PG_fscache in that inode. 9p and afs use the netfslib write helpers
so get switched over; cifs, for the moment, does page-by-page manual access
to the cache, so doesn't use PG_fscache and is unaffected.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Ronnie Sahlberg <ronniesahlberg@gmail.com>
cc: Shyam Prasad N <sprasad@microsoft.com>
cc: Tom Talpey <tom@talpey.com>
cc: Bharath SM <bharathsm@microsoft.com>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Anna Schumaker <anna@kernel.org>
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
2024-03-19 10:00:09 +00:00
|
|
|
struct netfs_group *group;
|
2022-02-15 23:15:57 +00:00
|
|
|
struct folio *folio = page_folio(vmf->page);
|
|
|
|
struct file *file = vmf->vma->vm_file;
|
2024-06-25 12:29:06 +00:00
|
|
|
struct address_space *mapping = file->f_mapping;
|
2022-02-15 23:15:57 +00:00
|
|
|
struct inode *inode = file_inode(file);
|
2023-10-06 17:16:15 +00:00
|
|
|
struct netfs_inode *ictx = netfs_inode(inode);
|
2024-10-05 18:23:04 +00:00
|
|
|
vm_fault_t ret = VM_FAULT_NOPAGE;
|
2022-02-15 23:15:57 +00:00
|
|
|
int err;
|
|
|
|
|
2024-07-18 20:07:32 +00:00
|
|
|
_enter("%lx", folio->index);
|
2022-02-15 23:15:57 +00:00
|
|
|
|
|
|
|
sb_start_pagefault(inode->i_sb);
|
|
|
|
|
2024-03-08 12:36:05 +00:00
|
|
|
if (folio_lock_killable(folio) < 0)
|
2022-02-15 23:15:57 +00:00
|
|
|
goto out;
|
2024-10-05 18:23:04 +00:00
|
|
|
if (folio->mapping != mapping)
|
|
|
|
goto unlock;
|
|
|
|
if (folio_wait_writeback_killable(folio) < 0)
|
|
|
|
goto unlock;
|
2022-02-15 23:15:57 +00:00
|
|
|
|
|
|
|
/* Can we see a streaming write here? */
|
|
|
|
if (WARN_ON(!folio_test_uptodate(folio))) {
|
2024-10-05 18:23:04 +00:00
|
|
|
ret = VM_FAULT_SIGBUS;
|
|
|
|
goto unlock;
|
2022-02-15 23:15:57 +00:00
|
|
|
}
|
|
|
|
|
netfs: Replace PG_fscache by setting folio->private and marking dirty
When dirty data is being written to the cache, setting/waiting on/clearing
the fscache flag is always done in tandem with setting/waiting on/clearing
the writeback flag. The netfslib buffered write routines wait on and set
both flags and the write request cleanup clears both flags, so the fscache
flag is almost superfluous.
The reason it isn't superfluous is because the fscache flag is also used to
indicate that data just read from the server is being written to the cache.
The flag is used to prevent a race involving overlapping direct-I/O writes
to the cache.
Change this to indicate that a page is in need of being copied to the cache
by placing a magic value in folio->private and marking the folios dirty.
Then when the writeback code sees a folio marked in this way, it only
writes it to the cache and not to the server.
If a folio that has this magic value set is modified, the value is just
replaced and the folio will then be uplodaded too.
With this, PG_fscache is no longer required by the netfslib core, 9p and
afs.
Ceph and nfs, however, still need to use the old PG_fscache-based tracking.
To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the
flags in the netfs_inode struct for those filesystems. This reenables the
use of PG_fscache in that inode. 9p and afs use the netfslib write helpers
so get switched over; cifs, for the moment, does page-by-page manual access
to the cache, so doesn't use PG_fscache and is unaffected.
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Ronnie Sahlberg <ronniesahlberg@gmail.com>
cc: Shyam Prasad N <sprasad@microsoft.com>
cc: Tom Talpey <tom@talpey.com>
cc: Bharath SM <bharathsm@microsoft.com>
cc: Trond Myklebust <trond.myklebust@hammerspace.com>
cc: Anna Schumaker <anna@kernel.org>
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: linux-nfs@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
2024-03-19 10:00:09 +00:00
|
|
|
group = netfs_folio_group(folio);
|
|
|
|
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
|
2022-02-15 23:15:57 +00:00
|
|
|
folio_unlock(folio);
|
2024-06-24 11:24:03 +00:00
|
|
|
err = filemap_fdatawrite_range(mapping,
|
|
|
|
folio_pos(folio),
|
|
|
|
folio_pos(folio) + folio_size(folio));
|
2022-02-15 23:15:57 +00:00
|
|
|
switch (err) {
|
|
|
|
case 0:
|
|
|
|
ret = VM_FAULT_RETRY;
|
|
|
|
goto out;
|
|
|
|
case -ENOMEM:
|
|
|
|
ret = VM_FAULT_OOM;
|
|
|
|
goto out;
|
|
|
|
default:
|
|
|
|
ret = VM_FAULT_SIGBUS;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (folio_test_dirty(folio))
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
|
|
|
|
else
|
|
|
|
trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
|
|
|
|
netfs_set_group(folio, netfs_group);
|
|
|
|
file_update_time(file);
|
2024-09-17 07:54:28 +00:00
|
|
|
set_bit(NETFS_ICTX_MODIFIED_ATTR, &ictx->flags);
|
2023-10-06 17:16:15 +00:00
|
|
|
if (ictx->ops->post_modify)
|
|
|
|
ictx->ops->post_modify(inode);
|
2022-02-15 23:15:57 +00:00
|
|
|
ret = VM_FAULT_LOCKED;
|
|
|
|
out:
|
|
|
|
sb_end_pagefault(inode->i_sb);
|
|
|
|
return ret;
|
2024-10-05 18:23:04 +00:00
|
|
|
unlock:
|
|
|
|
folio_unlock(folio);
|
|
|
|
goto out;
|
2022-02-15 23:15:57 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netfs_page_mkwrite);
|