mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 23:05:25 -04:00
netfs: Split retry code out of fs/netfs/write_collect.c
Split write-retry code out of fs/netfs/write_collect.c as it will become more elaborate when content crypto is introduced. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241216204124.3752367-8-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
d606c36294
commit
751e213f9f
@@ -15,7 +15,8 @@ netfs-y := \
|
||||
read_retry.o \
|
||||
rolling_buffer.o \
|
||||
write_collect.o \
|
||||
write_issue.o
|
||||
write_issue.o \
|
||||
write_retry.o
|
||||
|
||||
netfs-$(CONFIG_NETFS_STATS) += stats.o
|
||||
|
||||
|
||||
@@ -189,6 +189,11 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
|
||||
struct folio *writethrough_cache);
|
||||
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
|
||||
|
||||
/*
|
||||
* write_retry.c
|
||||
*/
|
||||
void netfs_retry_writes(struct netfs_io_request *wreq);
|
||||
|
||||
/*
|
||||
* Miscellaneous functions.
|
||||
*/
|
||||
|
||||
@@ -151,220 +151,6 @@ done:
|
||||
wreq->buffer.first_tail_slot = slot;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform retries on the streams that need it.
|
||||
*/
|
||||
static void netfs_retry_write_stream(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream)
|
||||
{
|
||||
struct list_head *next;
|
||||
|
||||
_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
|
||||
|
||||
if (list_empty(&stream->subrequests))
|
||||
return;
|
||||
|
||||
if (stream->source == NETFS_UPLOAD_TO_SERVER &&
|
||||
wreq->netfs_ops->retry_request)
|
||||
wreq->netfs_ops->retry_request(wreq, stream);
|
||||
|
||||
if (unlikely(stream->failed))
|
||||
return;
|
||||
|
||||
/* If there's no renegotiation to do, just resend each failed subreq. */
|
||||
if (!stream->prepare_write) {
|
||||
struct netfs_io_subrequest *subreq;
|
||||
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
|
||||
break;
|
||||
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
|
||||
struct iov_iter source = subreq->io_iter;
|
||||
|
||||
iov_iter_revert(&source, subreq->len - source.count);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
next = stream->subrequests.next;
|
||||
|
||||
do {
|
||||
struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
|
||||
struct iov_iter source;
|
||||
unsigned long long start, len;
|
||||
size_t part;
|
||||
bool boundary = false;
|
||||
|
||||
/* Go through the stream and find the next span of contiguous
|
||||
* data that we then rejig (cifs, for example, needs the wsize
|
||||
* renegotiating) and reissue.
|
||||
*/
|
||||
from = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
to = from;
|
||||
start = from->start + from->transferred;
|
||||
len = from->len - from->transferred;
|
||||
|
||||
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
|
||||
return;
|
||||
|
||||
list_for_each_continue(next, &stream->subrequests) {
|
||||
subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
if (subreq->start + subreq->transferred != start + len ||
|
||||
test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
|
||||
break;
|
||||
to = subreq;
|
||||
len += to->len;
|
||||
}
|
||||
|
||||
/* Determine the set of buffers we're going to use. Each
|
||||
* subreq gets a subset of a single overall contiguous buffer.
|
||||
*/
|
||||
netfs_reset_iter(from);
|
||||
source = from->io_iter;
|
||||
source.count = len;
|
||||
|
||||
/* Work through the sublist. */
|
||||
subreq = from;
|
||||
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
|
||||
if (!len)
|
||||
break;
|
||||
/* Renegotiate max_len (wsize) */
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
subreq->retry_count++;
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
part = min(len, stream->sreq_max_len);
|
||||
subreq->len = part;
|
||||
subreq->start = start;
|
||||
subreq->transferred = 0;
|
||||
len -= part;
|
||||
start += part;
|
||||
if (len && subreq == to &&
|
||||
__test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
|
||||
boundary = true;
|
||||
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
if (subreq == to)
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we managed to use fewer subreqs, we can discard the
|
||||
* excess; if we used the same number, then we're done.
|
||||
*/
|
||||
if (!len) {
|
||||
if (subreq == to)
|
||||
continue;
|
||||
list_for_each_entry_safe_from(subreq, tmp,
|
||||
&stream->subrequests, rreq_link) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
|
||||
list_del(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
|
||||
if (subreq == to)
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We ran out of subrequests, so we need to allocate some more
|
||||
* and insert them after.
|
||||
*/
|
||||
do {
|
||||
subreq = netfs_alloc_subrequest(wreq);
|
||||
subreq->source = to->source;
|
||||
subreq->start = start;
|
||||
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
|
||||
subreq->stream_nr = to->stream_nr;
|
||||
subreq->retry_count = 1;
|
||||
|
||||
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
|
||||
refcount_read(&subreq->ref),
|
||||
netfs_sreq_trace_new);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
|
||||
list_add(&subreq->rreq_link, &to->rreq_link);
|
||||
to = list_next_entry(to, rreq_link);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
|
||||
stream->sreq_max_len = len;
|
||||
stream->sreq_max_segs = INT_MAX;
|
||||
switch (stream->source) {
|
||||
case NETFS_UPLOAD_TO_SERVER:
|
||||
netfs_stat(&netfs_n_wh_upload);
|
||||
stream->sreq_max_len = umin(len, wreq->wsize);
|
||||
break;
|
||||
case NETFS_WRITE_TO_CACHE:
|
||||
netfs_stat(&netfs_n_wh_write);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
part = umin(len, stream->sreq_max_len);
|
||||
subreq->len = subreq->transferred + part;
|
||||
len -= part;
|
||||
start += part;
|
||||
if (!len && boundary) {
|
||||
__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
|
||||
boundary = false;
|
||||
}
|
||||
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
if (!len)
|
||||
break;
|
||||
|
||||
} while (len);
|
||||
|
||||
} while (!list_is_head(next, &stream->subrequests));
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform retries on the streams that need it. If we're doing content
|
||||
* encryption and the server copy changed due to a third-party write, we may
|
||||
* need to do an RMW cycle and also rewrite the data to the cache.
|
||||
*/
|
||||
static void netfs_retry_writes(struct netfs_io_request *wreq)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct netfs_io_stream *stream;
|
||||
int s;
|
||||
|
||||
/* Wait for all outstanding I/O to quiesce before performing retries as
|
||||
* we may need to renegotiate the I/O sizes.
|
||||
*/
|
||||
for (s = 0; s < NR_IO_STREAMS; s++) {
|
||||
stream = &wreq->io_streams[s];
|
||||
if (!stream->active)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Enc: Fetch changed partial pages
|
||||
// TODO: Enc: Reencrypt content if needed.
|
||||
// TODO: Enc: Wind back transferred point.
|
||||
// TODO: Enc: Mark cache pages for retry.
|
||||
|
||||
for (s = 0; s < NR_IO_STREAMS; s++) {
|
||||
stream = &wreq->io_streams[s];
|
||||
if (stream->need_retry) {
|
||||
stream->need_retry = false;
|
||||
netfs_retry_write_stream(wreq, stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Collect and assess the results of various write subrequests. We may need to
|
||||
* retry some of the results - or even do an RMW cycle for content crypto.
|
||||
|
||||
226
fs/netfs/write_retry.c
Normal file
226
fs/netfs/write_retry.c
Normal file
@@ -0,0 +1,226 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Network filesystem write retrying.
|
||||
*
|
||||
* Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/slab.h>
|
||||
#include "internal.h"
|
||||
|
||||
/*
|
||||
* Perform retries on the streams that need it.
|
||||
*/
|
||||
static void netfs_retry_write_stream(struct netfs_io_request *wreq,
|
||||
struct netfs_io_stream *stream)
|
||||
{
|
||||
struct list_head *next;
|
||||
|
||||
_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
|
||||
|
||||
if (list_empty(&stream->subrequests))
|
||||
return;
|
||||
|
||||
if (stream->source == NETFS_UPLOAD_TO_SERVER &&
|
||||
wreq->netfs_ops->retry_request)
|
||||
wreq->netfs_ops->retry_request(wreq, stream);
|
||||
|
||||
if (unlikely(stream->failed))
|
||||
return;
|
||||
|
||||
/* If there's no renegotiation to do, just resend each failed subreq. */
|
||||
if (!stream->prepare_write) {
|
||||
struct netfs_io_subrequest *subreq;
|
||||
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
|
||||
break;
|
||||
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
|
||||
struct iov_iter source = subreq->io_iter;
|
||||
|
||||
iov_iter_revert(&source, subreq->len - source.count);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
next = stream->subrequests.next;
|
||||
|
||||
do {
|
||||
struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
|
||||
struct iov_iter source;
|
||||
unsigned long long start, len;
|
||||
size_t part;
|
||||
bool boundary = false;
|
||||
|
||||
/* Go through the stream and find the next span of contiguous
|
||||
* data that we then rejig (cifs, for example, needs the wsize
|
||||
* renegotiating) and reissue.
|
||||
*/
|
||||
from = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
to = from;
|
||||
start = from->start + from->transferred;
|
||||
len = from->len - from->transferred;
|
||||
|
||||
if (test_bit(NETFS_SREQ_FAILED, &from->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &from->flags))
|
||||
return;
|
||||
|
||||
list_for_each_continue(next, &stream->subrequests) {
|
||||
subreq = list_entry(next, struct netfs_io_subrequest, rreq_link);
|
||||
if (subreq->start + subreq->transferred != start + len ||
|
||||
test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) ||
|
||||
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
|
||||
break;
|
||||
to = subreq;
|
||||
len += to->len;
|
||||
}
|
||||
|
||||
/* Determine the set of buffers we're going to use. Each
|
||||
* subreq gets a subset of a single overall contiguous buffer.
|
||||
*/
|
||||
netfs_reset_iter(from);
|
||||
source = from->io_iter;
|
||||
source.count = len;
|
||||
|
||||
/* Work through the sublist. */
|
||||
subreq = from;
|
||||
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
|
||||
if (!len)
|
||||
break;
|
||||
/* Renegotiate max_len (wsize) */
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
subreq->retry_count++;
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
part = min(len, stream->sreq_max_len);
|
||||
subreq->len = part;
|
||||
subreq->start = start;
|
||||
subreq->transferred = 0;
|
||||
len -= part;
|
||||
start += part;
|
||||
if (len && subreq == to &&
|
||||
__test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags))
|
||||
boundary = true;
|
||||
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
if (subreq == to)
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we managed to use fewer subreqs, we can discard the
|
||||
* excess; if we used the same number, then we're done.
|
||||
*/
|
||||
if (!len) {
|
||||
if (subreq == to)
|
||||
continue;
|
||||
list_for_each_entry_safe_from(subreq, tmp,
|
||||
&stream->subrequests, rreq_link) {
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_discard);
|
||||
list_del(&subreq->rreq_link);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done);
|
||||
if (subreq == to)
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We ran out of subrequests, so we need to allocate some more
|
||||
* and insert them after.
|
||||
*/
|
||||
do {
|
||||
subreq = netfs_alloc_subrequest(wreq);
|
||||
subreq->source = to->source;
|
||||
subreq->start = start;
|
||||
subreq->debug_index = atomic_inc_return(&wreq->subreq_counter);
|
||||
subreq->stream_nr = to->stream_nr;
|
||||
subreq->retry_count = 1;
|
||||
|
||||
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
|
||||
refcount_read(&subreq->ref),
|
||||
netfs_sreq_trace_new);
|
||||
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
|
||||
|
||||
list_add(&subreq->rreq_link, &to->rreq_link);
|
||||
to = list_next_entry(to, rreq_link);
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
|
||||
stream->sreq_max_len = len;
|
||||
stream->sreq_max_segs = INT_MAX;
|
||||
switch (stream->source) {
|
||||
case NETFS_UPLOAD_TO_SERVER:
|
||||
netfs_stat(&netfs_n_wh_upload);
|
||||
stream->sreq_max_len = umin(len, wreq->wsize);
|
||||
break;
|
||||
case NETFS_WRITE_TO_CACHE:
|
||||
netfs_stat(&netfs_n_wh_write);
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
stream->prepare_write(subreq);
|
||||
|
||||
part = umin(len, stream->sreq_max_len);
|
||||
subreq->len = subreq->transferred + part;
|
||||
len -= part;
|
||||
start += part;
|
||||
if (!len && boundary) {
|
||||
__set_bit(NETFS_SREQ_BOUNDARY, &to->flags);
|
||||
boundary = false;
|
||||
}
|
||||
|
||||
netfs_reissue_write(stream, subreq, &source);
|
||||
if (!len)
|
||||
break;
|
||||
|
||||
} while (len);
|
||||
|
||||
} while (!list_is_head(next, &stream->subrequests));
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform retries on the streams that need it. If we're doing content
|
||||
* encryption and the server copy changed due to a third-party write, we may
|
||||
* need to do an RMW cycle and also rewrite the data to the cache.
|
||||
*/
|
||||
void netfs_retry_writes(struct netfs_io_request *wreq)
|
||||
{
|
||||
struct netfs_io_subrequest *subreq;
|
||||
struct netfs_io_stream *stream;
|
||||
int s;
|
||||
|
||||
/* Wait for all outstanding I/O to quiesce before performing retries as
|
||||
* we may need to renegotiate the I/O sizes.
|
||||
*/
|
||||
for (s = 0; s < NR_IO_STREAMS; s++) {
|
||||
stream = &wreq->io_streams[s];
|
||||
if (!stream->active)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(subreq, &stream->subrequests, rreq_link) {
|
||||
wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Enc: Fetch changed partial pages
|
||||
// TODO: Enc: Reencrypt content if needed.
|
||||
// TODO: Enc: Wind back transferred point.
|
||||
// TODO: Enc: Mark cache pages for retry.
|
||||
|
||||
for (s = 0; s < NR_IO_STREAMS; s++) {
|
||||
stream = &wreq->io_streams[s];
|
||||
if (stream->need_retry) {
|
||||
stream->need_retry = false;
|
||||
netfs_retry_write_stream(wreq, stream);
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user