mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 15:02:40 -04:00
netfs: Don't use bh spinlock
All the accessing of the subrequest lists is now done in process context, possibly in a workqueue, but not now in a BH context, so we don't need the lock against BH interference when taking the netfs_io_request::lock spinlock. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241216204124.3752367-11-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
committed by
Christian Brauner
parent
31fc366aa7
commit
627cf64527
@@ -200,12 +200,12 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
subreq->len = size;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated = rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
|
||||
subreq->source = source;
|
||||
|
||||
@@ -68,12 +68,12 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
subreq->len = size;
|
||||
|
||||
atomic_inc(&rreq->nr_outstanding);
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated = rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_added);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
netfs_stat(&netfs_n_rh_download);
|
||||
if (rreq->netfs_ops->prepare_read) {
|
||||
|
||||
@@ -144,7 +144,7 @@ donation_changed:
|
||||
prev_donated = READ_ONCE(subreq->prev_donated);
|
||||
next_donated = READ_ONCE(subreq->next_donated);
|
||||
if (prev_donated || next_donated) {
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
prev_donated = subreq->prev_donated;
|
||||
next_donated = subreq->next_donated;
|
||||
subreq->start -= prev_donated;
|
||||
@@ -157,7 +157,7 @@ donation_changed:
|
||||
next_donated = subreq->next_donated = 0;
|
||||
}
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
}
|
||||
|
||||
avail = subreq->transferred;
|
||||
@@ -186,18 +186,18 @@ donation_changed:
|
||||
} else if (fpos < start) {
|
||||
excess = fend - subreq->start;
|
||||
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
/* If we complete first on a folio split with the
|
||||
* preceding subreq, donate to that subreq - otherwise
|
||||
* we get the responsibility.
|
||||
*/
|
||||
if (subreq->prev_donated != prev_donated) {
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
goto donation_changed;
|
||||
}
|
||||
|
||||
if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
pr_err("Can't donate prior to front\n");
|
||||
goto bad;
|
||||
}
|
||||
@@ -213,7 +213,7 @@ donation_changed:
|
||||
|
||||
if (subreq->consumed >= subreq->len)
|
||||
goto remove_subreq_locked;
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
} else {
|
||||
pr_err("fpos > start\n");
|
||||
goto bad;
|
||||
@@ -241,11 +241,11 @@ donation_changed:
|
||||
/* Donate the remaining downloaded data to one of the neighbouring
|
||||
* subrequests. Note that we may race with them doing the same thing.
|
||||
*/
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
|
||||
if (subreq->prev_donated != prev_donated ||
|
||||
subreq->next_donated != next_donated) {
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
cond_resched();
|
||||
goto donation_changed;
|
||||
}
|
||||
@@ -296,11 +296,11 @@ donation_changed:
|
||||
goto remove_subreq_locked;
|
||||
|
||||
remove_subreq:
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
remove_subreq_locked:
|
||||
subreq->consumed = subreq->len;
|
||||
list_del(&subreq->rreq_link);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
|
||||
return true;
|
||||
|
||||
|
||||
@@ -142,12 +142,12 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
|
||||
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
||||
subreq->retry_count++;
|
||||
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &rreq->subrequests);
|
||||
subreq->prev_donated += rreq->prev_donated;
|
||||
rreq->prev_donated = 0;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
|
||||
BUG_ON(!len);
|
||||
|
||||
@@ -217,9 +217,9 @@ abandon:
|
||||
__clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
|
||||
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
|
||||
}
|
||||
spin_lock_bh(&rreq->lock);
|
||||
spin_lock(&rreq->lock);
|
||||
list_splice_tail_init(&queue, &rreq->subrequests);
|
||||
spin_unlock_bh(&rreq->lock);
|
||||
spin_unlock(&rreq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -238,14 +238,14 @@ reassess_streams:
|
||||
|
||||
cancel:
|
||||
/* Remove if completely consumed. */
|
||||
spin_lock_bh(&wreq->lock);
|
||||
spin_lock(&wreq->lock);
|
||||
|
||||
remove = front;
|
||||
list_del_init(&front->rreq_link);
|
||||
front = list_first_entry_or_null(&stream->subrequests,
|
||||
struct netfs_io_subrequest, rreq_link);
|
||||
stream->front = front;
|
||||
spin_unlock_bh(&wreq->lock);
|
||||
spin_unlock(&wreq->lock);
|
||||
netfs_put_subrequest(remove, false,
|
||||
notes & SAW_FAILURE ?
|
||||
netfs_sreq_trace_put_cancel :
|
||||
|
||||
@@ -203,7 +203,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
* the list. The collector only goes nextwards and uses the lock to
|
||||
* remove entries off of the front.
|
||||
*/
|
||||
spin_lock_bh(&wreq->lock);
|
||||
spin_lock(&wreq->lock);
|
||||
list_add_tail(&subreq->rreq_link, &stream->subrequests);
|
||||
if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
|
||||
stream->front = subreq;
|
||||
@@ -214,7 +214,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_bh(&wreq->lock);
|
||||
spin_unlock(&wreq->lock);
|
||||
|
||||
stream->construct = subreq;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user