mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
netfs: Revert "netfs: Switch debug logging to pr_debug()"
Revert commit 163eae0fb0
to get back the
original operation of the debugging macros.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20240608151352.22860-2-ukleinek@kernel.org
Link: https://lore.kernel.org/r/1410685.1721333252@warthog.procyon.org.uk
cc: Uwe Kleine-König <ukleinek@kernel.org>
cc: Christian Brauner <brauner@kernel.org>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
933069701c
commit
a9d47a50cf
@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
|||||||
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
|
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
|
||||||
if (folio->index == rreq->no_unlock_folio &&
|
if (folio->index == rreq->no_unlock_folio &&
|
||||||
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
|
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
|
||||||
kdebug("no unlock");
|
_debug("no unlock");
|
||||||
else
|
else
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl)
|
|||||||
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
|
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
|
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
|
||||||
|
|
||||||
if (readahead_count(ractl) == 0)
|
if (readahead_count(ractl) == 0)
|
||||||
return;
|
return;
|
||||||
@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
|
|||||||
struct folio *sink = NULL;
|
struct folio *sink = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("%lx", folio->index);
|
_enter("%lx", folio->index);
|
||||||
|
|
||||||
rreq = netfs_alloc_request(mapping, file,
|
rreq = netfs_alloc_request(mapping, file,
|
||||||
folio_pos(folio), folio_size(folio),
|
folio_pos(folio), folio_size(folio),
|
||||||
@ -508,7 +508,7 @@ retry:
|
|||||||
|
|
||||||
have_folio:
|
have_folio:
|
||||||
*_folio = folio;
|
*_folio = folio;
|
||||||
kleave(" = 0");
|
_leave(" = 0");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_put:
|
error_put:
|
||||||
@ -518,7 +518,7 @@ error:
|
|||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
kleave(" = %d", ret);
|
_leave(" = %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netfs_write_begin);
|
EXPORT_SYMBOL(netfs_write_begin);
|
||||||
@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
|||||||
size_t flen = folio_size(folio);
|
size_t flen = folio_size(folio);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("%zx @%llx", flen, start);
|
_enter("%zx @%llx", flen, start);
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
|
||||||
@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
|||||||
error_put:
|
error_put:
|
||||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
|
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
|
||||||
error:
|
error:
|
||||||
kleave(" = %d", ret);
|
_leave(" = %d", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
|
|||||||
struct netfs_group *group = netfs_folio_group(folio);
|
struct netfs_group *group = netfs_folio_group(folio);
|
||||||
loff_t pos = folio_pos(folio);
|
loff_t pos = folio_pos(folio);
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
|
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
|
||||||
return NETFS_FLUSH_CONTENT;
|
return NETFS_FLUSH_CONTENT;
|
||||||
@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
|||||||
*/
|
*/
|
||||||
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
|
howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
|
||||||
flen, offset, part, maybe_trouble);
|
flen, offset, part, maybe_trouble);
|
||||||
kdebug("howto %u", howto);
|
_debug("howto %u", howto);
|
||||||
switch (howto) {
|
switch (howto) {
|
||||||
case NETFS_JUST_PREFETCH:
|
case NETFS_JUST_PREFETCH:
|
||||||
ret = netfs_prefetch_for_write(file, folio, offset, part);
|
ret = netfs_prefetch_for_write(file, folio, offset, part);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kdebug("prefetch = %zd", ret);
|
_debug("prefetch = %zd", ret);
|
||||||
goto error_folio_unlock;
|
goto error_folio_unlock;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -418,7 +418,7 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
iocb->ki_pos += written;
|
iocb->ki_pos += written;
|
||||||
kleave(" = %zd [%zd]", written, ret);
|
_leave(" = %zd [%zd]", written, ret);
|
||||||
return written ? written : ret;
|
return written ? written : ret;
|
||||||
|
|
||||||
error_folio_unlock:
|
error_folio_unlock:
|
||||||
@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
struct netfs_inode *ictx = netfs_inode(inode);
|
struct netfs_inode *ictx = netfs_inode(inode);
|
||||||
ssize_t ret;
|
ssize_t ret;
|
||||||
|
|
||||||
kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
|
_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
|
||||||
|
|
||||||
if (!iov_iter_count(from))
|
if (!iov_iter_count(from))
|
||||||
return 0;
|
return 0;
|
||||||
@ -529,7 +529,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
|
|||||||
vm_fault_t ret = VM_FAULT_RETRY;
|
vm_fault_t ret = VM_FAULT_RETRY;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
kenter("%lx", folio->index);
|
_enter("%lx", folio->index);
|
||||||
|
|
||||||
sb_start_pagefault(inode->i_sb);
|
sb_start_pagefault(inode->i_sb);
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
|
|||||||
size_t orig_count = iov_iter_count(iter);
|
size_t orig_count = iov_iter_count(iter);
|
||||||
bool async = !is_sync_kiocb(iocb);
|
bool async = !is_sync_kiocb(iocb);
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
if (!orig_count)
|
if (!orig_count)
|
||||||
return 0; /* Don't update atime */
|
return 0; /* Don't update atime */
|
||||||
|
@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
|
|||||||
size_t len = iov_iter_count(iter);
|
size_t len = iov_iter_count(iter);
|
||||||
bool async = !is_sync_kiocb(iocb);
|
bool async = !is_sync_kiocb(iocb);
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
/* We're going to need a bounce buffer if what we transmit is going to
|
/* We're going to need a bounce buffer if what we transmit is going to
|
||||||
* be different in some way to the source buffer, e.g. because it gets
|
* be different in some way to the source buffer, e.g. because it gets
|
||||||
@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
|
|||||||
*/
|
*/
|
||||||
// TODO
|
// TODO
|
||||||
|
|
||||||
kdebug("uw %llx-%llx", start, end);
|
_debug("uw %llx-%llx", start, end);
|
||||||
|
|
||||||
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
|
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
|
||||||
iocb->ki_flags & IOCB_DIRECT ?
|
iocb->ki_flags & IOCB_DIRECT ?
|
||||||
@ -96,7 +96,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
|
|||||||
wreq->cleanup = netfs_cleanup_dio_write;
|
wreq->cleanup = netfs_cleanup_dio_write;
|
||||||
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
|
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
kdebug("begin = %zd", ret);
|
_debug("begin = %zd", ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -143,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
unsigned long long end = pos + iov_iter_count(from) - 1;
|
unsigned long long end = pos + iov_iter_count(from) - 1;
|
||||||
|
|
||||||
kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
|
_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
|
||||||
|
|
||||||
if (!iov_iter_count(from))
|
if (!iov_iter_count(from))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache,
|
|||||||
{
|
{
|
||||||
int n_accesses;
|
int n_accesses;
|
||||||
|
|
||||||
kenter("{%s,%s}", ops->name, cache->name);
|
_enter("{%s,%s}", ops->name, cache->name);
|
||||||
|
|
||||||
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
|
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache,
|
|||||||
|
|
||||||
up_write(&fscache_addremove_sem);
|
up_write(&fscache_addremove_sem);
|
||||||
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
|
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
|
||||||
kleave(" = 0 [%s]", cache->name);
|
_leave(" = 0 [%s]", cache->name);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(fscache_add_cache);
|
EXPORT_SYMBOL(fscache_add_cache);
|
||||||
|
@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|||||||
{
|
{
|
||||||
struct fscache_cookie *cookie;
|
struct fscache_cookie *cookie;
|
||||||
|
|
||||||
kenter("V=%x", volume->debug_id);
|
_enter("V=%x", volume->debug_id);
|
||||||
|
|
||||||
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
|
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
|
|||||||
|
|
||||||
trace_fscache_acquire(cookie);
|
trace_fscache_acquire(cookie);
|
||||||
fscache_stat(&fscache_n_acquires_ok);
|
fscache_stat(&fscache_n_acquires_ok);
|
||||||
kleave(" = c=%08x", cookie->debug_id);
|
_leave(" = c=%08x", cookie->debug_id);
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__fscache_acquire_cookie);
|
EXPORT_SYMBOL(__fscache_acquire_cookie);
|
||||||
@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
|
|||||||
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
|
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
|
||||||
bool need_withdraw = false;
|
bool need_withdraw = false;
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
if (!cookie->volume->cache_priv) {
|
if (!cookie->volume->cache_priv) {
|
||||||
fscache_create_volume(cookie->volume, true);
|
fscache_create_volume(cookie->volume, true);
|
||||||
@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
|
|||||||
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
|
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
|
||||||
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
|
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
|
||||||
need_withdraw = true;
|
need_withdraw = true;
|
||||||
kleave(" [fail]");
|
_leave(" [fail]");
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
|
|||||||
bool queue = false;
|
bool queue = false;
|
||||||
int n_active;
|
int n_active;
|
||||||
|
|
||||||
kenter("c=%08x", cookie->debug_id);
|
_enter("c=%08x", cookie->debug_id);
|
||||||
|
|
||||||
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
|
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
|
||||||
"Trying to use relinquished cookie\n"))
|
"Trying to use relinquished cookie\n"))
|
||||||
@ -636,7 +636,7 @@ again:
|
|||||||
spin_unlock(&cookie->lock);
|
spin_unlock(&cookie->lock);
|
||||||
if (queue)
|
if (queue)
|
||||||
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
|
fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
|
||||||
kleave("");
|
_leave("");
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__fscache_use_cookie);
|
EXPORT_SYMBOL(__fscache_use_cookie);
|
||||||
|
|
||||||
@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
|
|||||||
enum fscache_cookie_state state;
|
enum fscache_cookie_state state;
|
||||||
bool wake = false;
|
bool wake = false;
|
||||||
|
|
||||||
kenter("c=%x", cookie->debug_id);
|
_enter("c=%x", cookie->debug_id);
|
||||||
|
|
||||||
again:
|
again:
|
||||||
spin_lock(&cookie->lock);
|
spin_lock(&cookie->lock);
|
||||||
@ -820,7 +820,7 @@ out:
|
|||||||
spin_unlock(&cookie->lock);
|
spin_unlock(&cookie->lock);
|
||||||
if (wake)
|
if (wake)
|
||||||
wake_up_cookie_state(cookie);
|
wake_up_cookie_state(cookie);
|
||||||
kleave("");
|
_leave("");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void fscache_cookie_worker(struct work_struct *work)
|
static void fscache_cookie_worker(struct work_struct *work)
|
||||||
@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
|
|||||||
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
|
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
|
||||||
spin_unlock(&cookie->lock);
|
spin_unlock(&cookie->lock);
|
||||||
fscache_stat(&fscache_n_cookies_lru_expired);
|
fscache_stat(&fscache_n_cookies_lru_expired);
|
||||||
kdebug("lru c=%x", cookie->debug_id);
|
_debug("lru c=%x", cookie->debug_id);
|
||||||
__fscache_withdraw_cookie(cookie);
|
__fscache_withdraw_cookie(cookie);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
|
|||||||
if (retire)
|
if (retire)
|
||||||
fscache_stat(&fscache_n_relinquishes_retire);
|
fscache_stat(&fscache_n_relinquishes_retire);
|
||||||
|
|
||||||
kenter("c=%08x{%d},%d",
|
_enter("c=%08x{%d},%d",
|
||||||
cookie->debug_id, atomic_read(&cookie->n_active), retire);
|
cookie->debug_id, atomic_read(&cookie->n_active), retire);
|
||||||
|
|
||||||
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
|
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
|
||||||
@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
|
|||||||
{
|
{
|
||||||
bool is_caching;
|
bool is_caching;
|
||||||
|
|
||||||
kenter("c=%x", cookie->debug_id);
|
_enter("c=%x", cookie->debug_id);
|
||||||
|
|
||||||
fscache_stat(&fscache_n_invalidates);
|
fscache_stat(&fscache_n_invalidates);
|
||||||
|
|
||||||
@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
|
|||||||
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
|
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
|
||||||
default:
|
default:
|
||||||
spin_unlock(&cookie->lock);
|
spin_unlock(&cookie->lock);
|
||||||
kleave(" [no %u]", cookie->state);
|
_leave(" [no %u]", cookie->state);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case FSCACHE_COOKIE_STATE_LOOKING_UP:
|
case FSCACHE_COOKIE_STATE_LOOKING_UP:
|
||||||
@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
case FSCACHE_COOKIE_STATE_CREATING:
|
case FSCACHE_COOKIE_STATE_CREATING:
|
||||||
spin_unlock(&cookie->lock);
|
spin_unlock(&cookie->lock);
|
||||||
kleave(" [look %x]", cookie->inval_counter);
|
_leave(" [look %x]", cookie->inval_counter);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case FSCACHE_COOKIE_STATE_ACTIVE:
|
case FSCACHE_COOKIE_STATE_ACTIVE:
|
||||||
@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
|
|||||||
|
|
||||||
if (is_caching)
|
if (is_caching)
|
||||||
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
|
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
|
||||||
kleave(" [inv]");
|
_leave(" [inv]");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
|
|||||||
|
|
||||||
again:
|
again:
|
||||||
if (!fscache_cache_is_live(cookie->volume->cache)) {
|
if (!fscache_cache_is_live(cookie->volume->cache)) {
|
||||||
kleave(" [broken]");
|
_leave(" [broken]");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
state = fscache_cookie_state(cookie);
|
state = fscache_cookie_state(cookie);
|
||||||
kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
|
_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case FSCACHE_COOKIE_STATE_CREATING:
|
case FSCACHE_COOKIE_STATE_CREATING:
|
||||||
@ -52,7 +52,7 @@ again:
|
|||||||
case FSCACHE_COOKIE_STATE_DROPPED:
|
case FSCACHE_COOKIE_STATE_DROPPED:
|
||||||
case FSCACHE_COOKIE_STATE_RELINQUISHING:
|
case FSCACHE_COOKIE_STATE_RELINQUISHING:
|
||||||
default:
|
default:
|
||||||
kleave(" [not live]");
|
_leave(" [not live]");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ again:
|
|||||||
spin_lock(&cookie->lock);
|
spin_lock(&cookie->lock);
|
||||||
|
|
||||||
state = fscache_cookie_state(cookie);
|
state = fscache_cookie_state(cookie);
|
||||||
kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
|
_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
|
||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case FSCACHE_COOKIE_STATE_LOOKING_UP:
|
case FSCACHE_COOKIE_STATE_LOOKING_UP:
|
||||||
@ -140,7 +140,7 @@ failed:
|
|||||||
cres->cache_priv = NULL;
|
cres->cache_priv = NULL;
|
||||||
cres->ops = NULL;
|
cres->ops = NULL;
|
||||||
fscache_end_cookie_access(cookie, fscache_access_io_not_live);
|
fscache_end_cookie_access(cookie, fscache_access_io_not_live);
|
||||||
kleave(" = -ENOBUFS");
|
_leave(" = -ENOBUFS");
|
||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
|
|||||||
if (len == 0)
|
if (len == 0)
|
||||||
goto abandon;
|
goto abandon;
|
||||||
|
|
||||||
kenter("%llx,%zx", start, len);
|
_enter("%llx,%zx", start, len);
|
||||||
|
|
||||||
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
|
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
|
||||||
if (!wreq)
|
if (!wreq)
|
||||||
|
@ -99,7 +99,7 @@ error_wq:
|
|||||||
*/
|
*/
|
||||||
void __exit fscache_exit(void)
|
void __exit fscache_exit(void)
|
||||||
{
|
{
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
kmem_cache_destroy(fscache_cookie_jar);
|
kmem_cache_destroy(fscache_cookie_jar);
|
||||||
fscache_proc_cleanup();
|
fscache_proc_cleanup();
|
||||||
|
@ -264,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
|
|||||||
fscache_see_volume(volume, fscache_volume_new_acquire);
|
fscache_see_volume(volume, fscache_volume_new_acquire);
|
||||||
fscache_stat(&fscache_n_volumes);
|
fscache_stat(&fscache_n_volumes);
|
||||||
up_write(&fscache_addremove_sem);
|
up_write(&fscache_addremove_sem);
|
||||||
kleave(" = v=%x", volume->debug_id);
|
_leave(" = v=%x", volume->debug_id);
|
||||||
return volume;
|
return volume;
|
||||||
|
|
||||||
err_vol:
|
err_vol:
|
||||||
@ -466,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume)
|
|||||||
{
|
{
|
||||||
int n_accesses;
|
int n_accesses;
|
||||||
|
|
||||||
kdebug("withdraw V=%x", volume->debug_id);
|
_debug("withdraw V=%x", volume->debug_id);
|
||||||
|
|
||||||
/* Allow wakeups on dec-to-0 */
|
/* Allow wakeups on dec-to-0 */
|
||||||
n_accesses = atomic_dec_return(&volume->n_accesses);
|
n_accesses = atomic_dec_return(&volume->n_accesses);
|
||||||
|
@ -34,6 +34,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
|
|||||||
/*
|
/*
|
||||||
* main.c
|
* main.c
|
||||||
*/
|
*/
|
||||||
|
extern unsigned int netfs_debug;
|
||||||
extern struct list_head netfs_io_requests;
|
extern struct list_head netfs_io_requests;
|
||||||
extern spinlock_t netfs_proc_lock;
|
extern spinlock_t netfs_proc_lock;
|
||||||
extern mempool_t netfs_request_pool;
|
extern mempool_t netfs_request_pool;
|
||||||
@ -353,12 +354,42 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait);
|
|||||||
* debug tracing
|
* debug tracing
|
||||||
*/
|
*/
|
||||||
#define dbgprintk(FMT, ...) \
|
#define dbgprintk(FMT, ...) \
|
||||||
pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
|
printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
|
||||||
|
|
||||||
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
|
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
|
||||||
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
|
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
|
||||||
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
|
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
|
||||||
|
|
||||||
|
#ifdef __KDEBUG
|
||||||
|
#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
|
||||||
|
#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
|
||||||
|
#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
|
||||||
|
|
||||||
|
#elif defined(CONFIG_NETFS_DEBUG)
|
||||||
|
#define _enter(FMT, ...) \
|
||||||
|
do { \
|
||||||
|
if (netfs_debug) \
|
||||||
|
kenter(FMT, ##__VA_ARGS__); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define _leave(FMT, ...) \
|
||||||
|
do { \
|
||||||
|
if (netfs_debug) \
|
||||||
|
kleave(FMT, ##__VA_ARGS__); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define _debug(FMT, ...) \
|
||||||
|
do { \
|
||||||
|
if (netfs_debug) \
|
||||||
|
kdebug(FMT, ##__VA_ARGS__); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#else
|
||||||
|
#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
|
||||||
|
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
|
||||||
|
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* assertions
|
* assertions
|
||||||
*/
|
*/
|
||||||
|
@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
|
|||||||
if (count == remaining)
|
if (count == remaining)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
|
_debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
|
||||||
rreq->debug_id, subreq->debug_index,
|
rreq->debug_id, subreq->debug_index,
|
||||||
iov_iter_count(&subreq->io_iter), subreq->transferred,
|
iov_iter_count(&subreq->io_iter), subreq->transferred,
|
||||||
subreq->len, rreq->i_size,
|
subreq->len, rreq->i_size,
|
||||||
@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
|
|||||||
struct netfs_io_request *rreq = subreq->rreq;
|
struct netfs_io_request *rreq = subreq->rreq;
|
||||||
int u;
|
int u;
|
||||||
|
|
||||||
kenter("R=%x[%x]{%llx,%lx},%zd",
|
_enter("R=%x[%x]{%llx,%lx},%zd",
|
||||||
rreq->debug_id, subreq->debug_index,
|
rreq->debug_id, subreq->debug_index,
|
||||||
subreq->start, subreq->flags, transferred_or_error);
|
subreq->start, subreq->flags, transferred_or_error);
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
|
|||||||
struct netfs_inode *ictx = netfs_inode(rreq->inode);
|
struct netfs_inode *ictx = netfs_inode(rreq->inode);
|
||||||
size_t lsize;
|
size_t lsize;
|
||||||
|
|
||||||
kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
|
_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
|
||||||
|
|
||||||
if (rreq->origin != NETFS_DIO_READ) {
|
if (rreq->origin != NETFS_DIO_READ) {
|
||||||
source = netfs_cache_prepare_read(subreq, rreq->i_size);
|
source = netfs_cache_prepare_read(subreq, rreq->i_size);
|
||||||
@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
|
|||||||
subreq->start = rreq->start + rreq->submitted;
|
subreq->start = rreq->start + rreq->submitted;
|
||||||
subreq->len = io_iter->count;
|
subreq->len = io_iter->count;
|
||||||
|
|
||||||
kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
|
_debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
|
||||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||||
|
|
||||||
/* Call out to the cache to find out what it can do with the remaining
|
/* Call out to the cache to find out what it can do with the remaining
|
||||||
@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
|||||||
struct iov_iter io_iter;
|
struct iov_iter io_iter;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("R=%x %llx-%llx",
|
_enter("R=%x %llx-%llx",
|
||||||
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
|
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
|
||||||
|
|
||||||
if (rreq->len == 0) {
|
if (rreq->len == 0) {
|
||||||
@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
|
|||||||
atomic_set(&rreq->nr_outstanding, 1);
|
atomic_set(&rreq->nr_outstanding, 1);
|
||||||
io_iter = rreq->io_iter;
|
io_iter = rreq->io_iter;
|
||||||
do {
|
do {
|
||||||
kdebug("submit %llx + %llx >= %llx",
|
_debug("submit %llx + %llx >= %llx",
|
||||||
rreq->start, rreq->submitted, rreq->i_size);
|
rreq->start, rreq->submitted, rreq->i_size);
|
||||||
if (rreq->origin == NETFS_DIO_READ &&
|
if (rreq->origin == NETFS_DIO_READ &&
|
||||||
rreq->start + rreq->submitted >= rreq->i_size)
|
rreq->start + rreq->submitted >= rreq->i_size)
|
||||||
|
@ -20,6 +20,10 @@ MODULE_LICENSE("GPL");
|
|||||||
|
|
||||||
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
|
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
|
||||||
|
|
||||||
|
unsigned netfs_debug;
|
||||||
|
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
|
||||||
|
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
|
||||||
|
|
||||||
static struct kmem_cache *netfs_request_slab;
|
static struct kmem_cache *netfs_request_slab;
|
||||||
static struct kmem_cache *netfs_subrequest_slab;
|
static struct kmem_cache *netfs_subrequest_slab;
|
||||||
mempool_t netfs_request_pool;
|
mempool_t netfs_request_pool;
|
||||||
|
@ -26,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
|
|||||||
struct fscache_cookie *cookie = netfs_i_cookie(ictx);
|
struct fscache_cookie *cookie = netfs_i_cookie(ictx);
|
||||||
bool need_use = false;
|
bool need_use = false;
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
if (!filemap_dirty_folio(mapping, folio))
|
if (!filemap_dirty_folio(mapping, folio))
|
||||||
return false;
|
return false;
|
||||||
@ -99,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
|
|||||||
struct netfs_folio *finfo;
|
struct netfs_folio *finfo;
|
||||||
size_t flen = folio_size(folio);
|
size_t flen = folio_size(folio);
|
||||||
|
|
||||||
kenter("{%lx},%zx,%zx", folio->index, offset, length);
|
_enter("{%lx},%zx,%zx", folio->index, offset, length);
|
||||||
|
|
||||||
if (!folio_test_private(folio))
|
if (!folio_test_private(folio))
|
||||||
return;
|
return;
|
||||||
|
@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
|
|||||||
{
|
{
|
||||||
struct list_head *next;
|
struct list_head *next;
|
||||||
|
|
||||||
kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
|
_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
|
||||||
|
|
||||||
if (list_empty(&stream->subrequests))
|
if (list_empty(&stream->subrequests))
|
||||||
return;
|
return;
|
||||||
@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
|
|||||||
unsigned int notes;
|
unsigned int notes;
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
|
_enter("%llx-%llx", wreq->start, wreq->start + wreq->len);
|
||||||
trace_netfs_collect(wreq);
|
trace_netfs_collect(wreq);
|
||||||
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
|
trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
|
||||||
|
|
||||||
@ -409,7 +409,7 @@ reassess_streams:
|
|||||||
front = stream->front;
|
front = stream->front;
|
||||||
while (front) {
|
while (front) {
|
||||||
trace_netfs_collect_sreq(wreq, front);
|
trace_netfs_collect_sreq(wreq, front);
|
||||||
//kdebug("sreq [%x] %llx %zx/%zx",
|
//_debug("sreq [%x] %llx %zx/%zx",
|
||||||
// front->debug_index, front->start, front->transferred, front->len);
|
// front->debug_index, front->start, front->transferred, front->len);
|
||||||
|
|
||||||
/* Stall if there may be a discontinuity. */
|
/* Stall if there may be a discontinuity. */
|
||||||
@ -598,7 +598,7 @@ reassess_streams:
|
|||||||
out:
|
out:
|
||||||
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
|
netfs_put_group_many(wreq->group, wreq->nr_group_rel);
|
||||||
wreq->nr_group_rel = 0;
|
wreq->nr_group_rel = 0;
|
||||||
kleave(" = %x", notes);
|
_leave(" = %x", notes);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
need_retry:
|
need_retry:
|
||||||
@ -606,7 +606,7 @@ need_retry:
|
|||||||
* that any partially completed op will have had any wholly transferred
|
* that any partially completed op will have had any wholly transferred
|
||||||
* folios removed from it.
|
* folios removed from it.
|
||||||
*/
|
*/
|
||||||
kdebug("retry");
|
_debug("retry");
|
||||||
netfs_retry_writes(wreq);
|
netfs_retry_writes(wreq);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work)
|
|||||||
size_t transferred;
|
size_t transferred;
|
||||||
int s;
|
int s;
|
||||||
|
|
||||||
kenter("R=%x", wreq->debug_id);
|
_enter("R=%x", wreq->debug_id);
|
||||||
|
|
||||||
netfs_see_request(wreq, netfs_rreq_trace_see_work);
|
netfs_see_request(wreq, netfs_rreq_trace_see_work);
|
||||||
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
|
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
|
||||||
@ -684,7 +684,7 @@ void netfs_write_collection_worker(struct work_struct *work)
|
|||||||
if (wreq->origin == NETFS_DIO_WRITE)
|
if (wreq->origin == NETFS_DIO_WRITE)
|
||||||
inode_dio_end(wreq->inode);
|
inode_dio_end(wreq->inode);
|
||||||
|
|
||||||
kdebug("finished");
|
_debug("finished");
|
||||||
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
|
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
|
||||||
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
|
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
|
||||||
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
|
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
|
||||||
@ -744,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
|
|||||||
struct netfs_io_request *wreq = subreq->rreq;
|
struct netfs_io_request *wreq = subreq->rreq;
|
||||||
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
|
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
|
||||||
|
|
||||||
kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
|
_enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
|
||||||
|
|
||||||
switch (subreq->source) {
|
switch (subreq->source) {
|
||||||
case NETFS_UPLOAD_TO_SERVER:
|
case NETFS_UPLOAD_TO_SERVER:
|
||||||
|
@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
|
|||||||
if (IS_ERR(wreq))
|
if (IS_ERR(wreq))
|
||||||
return wreq;
|
return wreq;
|
||||||
|
|
||||||
kenter("R=%x", wreq->debug_id);
|
_enter("R=%x", wreq->debug_id);
|
||||||
|
|
||||||
ictx = netfs_inode(wreq->inode);
|
ictx = netfs_inode(wreq->inode);
|
||||||
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
|
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
|
||||||
@ -159,7 +159,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
|
|||||||
subreq->max_nr_segs = INT_MAX;
|
subreq->max_nr_segs = INT_MAX;
|
||||||
subreq->stream_nr = stream->stream_nr;
|
subreq->stream_nr = stream->stream_nr;
|
||||||
|
|
||||||
kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
|
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
|
||||||
|
|
||||||
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
|
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
|
||||||
refcount_read(&subreq->ref),
|
refcount_read(&subreq->ref),
|
||||||
@ -215,7 +215,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
|
|||||||
{
|
{
|
||||||
struct netfs_io_request *wreq = subreq->rreq;
|
struct netfs_io_request *wreq = subreq->rreq;
|
||||||
|
|
||||||
kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
|
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
|
||||||
|
|
||||||
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
|
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
|
||||||
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
|
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
|
||||||
@ -272,11 +272,11 @@ int netfs_advance_write(struct netfs_io_request *wreq,
|
|||||||
size_t part;
|
size_t part;
|
||||||
|
|
||||||
if (!stream->avail) {
|
if (!stream->avail) {
|
||||||
kleave("no write");
|
_leave("no write");
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
|
_enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
|
||||||
|
|
||||||
if (subreq && start != subreq->start + subreq->len) {
|
if (subreq && start != subreq->start + subreq->len) {
|
||||||
netfs_issue_write(wreq, stream);
|
netfs_issue_write(wreq, stream);
|
||||||
@ -288,7 +288,7 @@ int netfs_advance_write(struct netfs_io_request *wreq,
|
|||||||
subreq = stream->construct;
|
subreq = stream->construct;
|
||||||
|
|
||||||
part = min(subreq->max_len - subreq->len, len);
|
part = min(subreq->max_len - subreq->len, len);
|
||||||
kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
|
_debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
|
||||||
subreq->len += part;
|
subreq->len += part;
|
||||||
subreq->nr_segs++;
|
subreq->nr_segs++;
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
|
|||||||
bool to_eof = false, streamw = false;
|
bool to_eof = false, streamw = false;
|
||||||
bool debug = false;
|
bool debug = false;
|
||||||
|
|
||||||
kenter("");
|
_enter("");
|
||||||
|
|
||||||
/* netfs_perform_write() may shift i_size around the page or from out
|
/* netfs_perform_write() may shift i_size around the page or from out
|
||||||
* of the page to beyond it, but cannot move i_size into or through the
|
* of the page to beyond it, but cannot move i_size into or through the
|
||||||
@ -329,7 +329,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
|
|||||||
|
|
||||||
if (fpos >= i_size) {
|
if (fpos >= i_size) {
|
||||||
/* mmap beyond eof. */
|
/* mmap beyond eof. */
|
||||||
kdebug("beyond eof");
|
_debug("beyond eof");
|
||||||
folio_start_writeback(folio);
|
folio_start_writeback(folio);
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
wreq->nr_group_rel += netfs_folio_written_back(folio);
|
wreq->nr_group_rel += netfs_folio_written_back(folio);
|
||||||
@ -363,7 +363,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
|
|||||||
}
|
}
|
||||||
flen -= foff;
|
flen -= foff;
|
||||||
|
|
||||||
kdebug("folio %zx %zx %zx", foff, flen, fsize);
|
_debug("folio %zx %zx %zx", foff, flen, fsize);
|
||||||
|
|
||||||
/* Deal with discontinuities in the stream of dirty pages. These can
|
/* Deal with discontinuities in the stream of dirty pages. These can
|
||||||
* arise from a number of sources:
|
* arise from a number of sources:
|
||||||
@ -487,7 +487,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
|
|||||||
for (int s = 0; s < NR_IO_STREAMS; s++)
|
for (int s = 0; s < NR_IO_STREAMS; s++)
|
||||||
netfs_issue_write(wreq, &wreq->io_streams[s]);
|
netfs_issue_write(wreq, &wreq->io_streams[s]);
|
||||||
|
|
||||||
kleave(" = 0");
|
_leave(" = 0");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -522,7 +522,7 @@ int netfs_writepages(struct address_space *mapping,
|
|||||||
netfs_stat(&netfs_n_wh_writepages);
|
netfs_stat(&netfs_n_wh_writepages);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
|
_debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
|
||||||
|
|
||||||
/* It appears we don't have to handle cyclic writeback wrapping. */
|
/* It appears we don't have to handle cyclic writeback wrapping. */
|
||||||
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
|
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
|
||||||
@ -546,14 +546,14 @@ int netfs_writepages(struct address_space *mapping,
|
|||||||
mutex_unlock(&ictx->wb_lock);
|
mutex_unlock(&ictx->wb_lock);
|
||||||
|
|
||||||
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
|
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
|
||||||
kleave(" = %d", error);
|
_leave(" = %d", error);
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
couldnt_start:
|
couldnt_start:
|
||||||
netfs_kill_dirty_pages(mapping, wbc, folio);
|
netfs_kill_dirty_pages(mapping, wbc, folio);
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&ictx->wb_lock);
|
mutex_unlock(&ictx->wb_lock);
|
||||||
kleave(" = %d", error);
|
_leave(" = %d", error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netfs_writepages);
|
EXPORT_SYMBOL(netfs_writepages);
|
||||||
@ -590,7 +590,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
|
|||||||
struct folio *folio, size_t copied, bool to_page_end,
|
struct folio *folio, size_t copied, bool to_page_end,
|
||||||
struct folio **writethrough_cache)
|
struct folio **writethrough_cache)
|
||||||
{
|
{
|
||||||
kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
|
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
|
||||||
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
|
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
|
||||||
|
|
||||||
if (!*writethrough_cache) {
|
if (!*writethrough_cache) {
|
||||||
@ -624,7 +624,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
|
|||||||
struct netfs_inode *ictx = netfs_inode(wreq->inode);
|
struct netfs_inode *ictx = netfs_inode(wreq->inode);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
kenter("R=%x", wreq->debug_id);
|
_enter("R=%x", wreq->debug_id);
|
||||||
|
|
||||||
if (writethrough_cache)
|
if (writethrough_cache)
|
||||||
netfs_write_folio(wreq, wbc, writethrough_cache);
|
netfs_write_folio(wreq, wbc, writethrough_cache);
|
||||||
@ -657,7 +657,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
|
|||||||
loff_t start = wreq->start;
|
loff_t start = wreq->start;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
kenter("%zx", len);
|
_enter("%zx", len);
|
||||||
|
|
||||||
if (wreq->origin == NETFS_DIO_WRITE)
|
if (wreq->origin == NETFS_DIO_WRITE)
|
||||||
inode_dio_begin(wreq->inode);
|
inode_dio_begin(wreq->inode);
|
||||||
@ -665,7 +665,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
|
|||||||
while (len) {
|
while (len) {
|
||||||
// TODO: Prepare content encryption
|
// TODO: Prepare content encryption
|
||||||
|
|
||||||
kdebug("unbuffered %zx", len);
|
_debug("unbuffered %zx", len);
|
||||||
part = netfs_advance_write(wreq, upload, start, len, false);
|
part = netfs_advance_write(wreq, upload, start, len, false);
|
||||||
start += part;
|
start += part;
|
||||||
len -= part;
|
len -= part;
|
||||||
@ -684,6 +684,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
|
|||||||
if (list_empty(&upload->subrequests))
|
if (list_empty(&upload->subrequests))
|
||||||
netfs_wake_write_collector(wreq, false);
|
netfs_wake_write_collector(wreq, false);
|
||||||
|
|
||||||
kleave(" = %d", error);
|
_leave(" = %d", error);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user