mirror of
https://github.com/torvalds/linux.git
synced 2026-04-18 06:44:00 -04:00
Merge tag 'vfs-7.1-rc1.integrity' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull vfs integrity updates from Christian Brauner: "This adds support to generate and verify integrity information (aka T10 PI) in the file system, instead of the automatic below the covers support that is currently used. The implementation is based on refactoring the existing block layer PI code to be reusable for this use case, and then adding relatively small wrappers for the file system use case. These are then used in iomap to implement the semantics, and wired up in XFS with a small amount of glue code. Compared to the baseline this does not change performance for writes, but increases read performance up to 15% for 4k I/O, with the benefit decreasing with larger I/O sizes as even the baseline maxes out the device quickly on my older enterprise SSD" * tag 'vfs-7.1-rc1.integrity' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: xfs: support T10 protection information iomap: support T10 protection information iomap: support ioends for buffered reads iomap: add a bioset pointer to iomap_read_folio_ops ntfs3: remove copy and pasted iomap code iomap: allow file systems to hook into buffered read bio submission iomap: only call into ->submit_read when there is a read_ctx iomap: pass the iomap_iter to ->submit_read iomap: refactor iomap_bio_read_folio_range block: pass a maxlen argument to bio_iov_iter_bounce block: add fs_bio_integrity helpers block: make max_integrity_io_size public block: prepare generation / verification helpers for fs usage block: add a bdev_has_integrity_csum helper block: factor out a bio_integrity_setup_default helper block: factor out a bio_integrity_action helper
This commit is contained in:
@@ -26,7 +26,7 @@ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
|
||||
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o \
|
||||
bio-integrity-auto.o
|
||||
bio-integrity-auto.o bio-integrity-fs.o
|
||||
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
||||
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||
|
||||
@@ -39,7 +39,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
|
||||
container_of(work, struct bio_integrity_data, work);
|
||||
struct bio *bio = bid->bio;
|
||||
|
||||
blk_integrity_verify_iter(bio, &bid->saved_bio_iter);
|
||||
bio->bi_status = bio_integrity_verify(bio, &bid->saved_bio_iter);
|
||||
bio_integrity_finish(bid);
|
||||
bio_endio(bio);
|
||||
}
|
||||
@@ -50,11 +50,6 @@ static bool bip_should_check(struct bio_integrity_payload *bip)
|
||||
return bip->bip_flags & BIP_CHECK_FLAGS;
|
||||
}
|
||||
|
||||
static bool bi_offload_capable(struct blk_integrity *bi)
|
||||
{
|
||||
return bi->metadata_size == bi->pi_tuple_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* __bio_integrity_endio - Integrity I/O completion function
|
||||
* @bio: Protected bio
|
||||
@@ -84,83 +79,30 @@ bool __bio_integrity_endio(struct bio *bio)
|
||||
/**
|
||||
* bio_integrity_prep - Prepare bio for integrity I/O
|
||||
* @bio: bio to prepare
|
||||
* @action: preparation action needed (BI_ACT_*)
|
||||
*
|
||||
* Checks if the bio already has an integrity payload attached. If it does, the
|
||||
* payload has been generated by another kernel subsystem, and we just pass it
|
||||
* through.
|
||||
* Otherwise allocates integrity payload and for writes the integrity metadata
|
||||
* will be generated. For reads, the completion handler will verify the
|
||||
* metadata.
|
||||
* Allocate the integrity payload. For writes, generate the integrity metadata
|
||||
* and for reads, setup the completion handler to verify the metadata.
|
||||
*
|
||||
* This is used for bios that do not have user integrity payloads attached.
|
||||
*/
|
||||
bool bio_integrity_prep(struct bio *bio)
|
||||
void bio_integrity_prep(struct bio *bio, unsigned int action)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_data *bid;
|
||||
bool set_flags = true;
|
||||
gfp_t gfp = GFP_NOIO;
|
||||
|
||||
if (!bi)
|
||||
return true;
|
||||
|
||||
if (!bio_sectors(bio))
|
||||
return true;
|
||||
|
||||
/* Already protected? */
|
||||
if (bio_integrity(bio))
|
||||
return true;
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
|
||||
if (bi_offload_capable(bi))
|
||||
return true;
|
||||
set_flags = false;
|
||||
}
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
/*
|
||||
* Zero the memory allocated to not leak uninitialized kernel
|
||||
* memory to disk for non-integrity metadata where nothing else
|
||||
* initializes the memory.
|
||||
*/
|
||||
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
|
||||
if (bi_offload_capable(bi))
|
||||
return true;
|
||||
set_flags = false;
|
||||
gfp |= __GFP_ZERO;
|
||||
} else if (bi->metadata_size > bi->pi_tuple_size)
|
||||
gfp |= __GFP_ZERO;
|
||||
break;
|
||||
default:
|
||||
return true;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
|
||||
return true;
|
||||
|
||||
bid = mempool_alloc(&bid_pool, GFP_NOIO);
|
||||
bio_integrity_init(bio, &bid->bip, &bid->bvec, 1);
|
||||
bid->bio = bio;
|
||||
bid->bip.bip_flags |= BIP_BLOCK_INTEGRITY;
|
||||
bio_integrity_alloc_buf(bio, gfp & __GFP_ZERO);
|
||||
|
||||
bip_set_seed(&bid->bip, bio->bi_iter.bi_sector);
|
||||
|
||||
if (set_flags) {
|
||||
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
|
||||
bid->bip.bip_flags |= BIP_IP_CHECKSUM;
|
||||
if (bi->csum_type)
|
||||
bid->bip.bip_flags |= BIP_CHECK_GUARD;
|
||||
if (bi->flags & BLK_INTEGRITY_REF_TAG)
|
||||
bid->bip.bip_flags |= BIP_CHECK_REFTAG;
|
||||
}
|
||||
bio_integrity_alloc_buf(bio, action & BI_ACT_ZERO);
|
||||
if (action & BI_ACT_CHECK)
|
||||
bio_integrity_setup_default(bio);
|
||||
|
||||
/* Auto-generate integrity metadata if this is a write */
|
||||
if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip))
|
||||
blk_integrity_generate(bio);
|
||||
bio_integrity_generate(bio);
|
||||
else
|
||||
bid->saved_bio_iter = bio->bi_iter;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_prep);
|
||||
|
||||
|
||||
81
block/bio-integrity-fs.c
Normal file
81
block/bio-integrity-fs.c
Normal file
@@ -0,0 +1,81 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2025 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/blk-integrity.h>
|
||||
#include <linux/bio-integrity.h>
|
||||
#include "blk.h"
|
||||
|
||||
struct fs_bio_integrity_buf {
|
||||
struct bio_integrity_payload bip;
|
||||
struct bio_vec bvec;
|
||||
};
|
||||
|
||||
static struct kmem_cache *fs_bio_integrity_cache;
|
||||
static mempool_t fs_bio_integrity_pool;
|
||||
|
||||
unsigned int fs_bio_integrity_alloc(struct bio *bio)
|
||||
{
|
||||
struct fs_bio_integrity_buf *iib;
|
||||
unsigned int action;
|
||||
|
||||
action = bio_integrity_action(bio);
|
||||
if (!action)
|
||||
return 0;
|
||||
|
||||
iib = mempool_alloc(&fs_bio_integrity_pool, GFP_NOIO);
|
||||
bio_integrity_init(bio, &iib->bip, &iib->bvec, 1);
|
||||
|
||||
bio_integrity_alloc_buf(bio, action & BI_ACT_ZERO);
|
||||
if (action & BI_ACT_CHECK)
|
||||
bio_integrity_setup_default(bio);
|
||||
return action;
|
||||
}
|
||||
|
||||
void fs_bio_integrity_free(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
bio_integrity_free_buf(bip);
|
||||
mempool_free(container_of(bip, struct fs_bio_integrity_buf, bip),
|
||||
&fs_bio_integrity_pool);
|
||||
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
||||
void fs_bio_integrity_generate(struct bio *bio)
|
||||
{
|
||||
if (fs_bio_integrity_alloc(bio))
|
||||
bio_integrity_generate(bio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fs_bio_integrity_generate);
|
||||
|
||||
int fs_bio_integrity_verify(struct bio *bio, sector_t sector, unsigned int size)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
/*
|
||||
* Reinitialize bip->bip_iter.
|
||||
*
|
||||
* This is for use in the submitter after the driver is done with the
|
||||
* bio. Requires the submitter to remember the sector and the size.
|
||||
*/
|
||||
memset(&bip->bip_iter, 0, sizeof(bip->bip_iter));
|
||||
bip->bip_iter.bi_sector = sector;
|
||||
bip->bip_iter.bi_size = bio_integrity_bytes(bi, size >> SECTOR_SHIFT);
|
||||
return blk_status_to_errno(bio_integrity_verify(bio, &bip->bip_iter));
|
||||
}
|
||||
|
||||
static int __init fs_bio_integrity_init(void)
|
||||
{
|
||||
fs_bio_integrity_cache = kmem_cache_create("fs_bio_integrity",
|
||||
sizeof(struct fs_bio_integrity_buf), 0,
|
||||
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
|
||||
if (mempool_init_slab_pool(&fs_bio_integrity_pool, BIO_POOL_SIZE,
|
||||
fs_bio_integrity_cache))
|
||||
panic("fs_bio_integrity: can't create pool\n");
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(fs_bio_integrity_init);
|
||||
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/blk-integrity.h>
|
||||
#include <linux/t10-pi.h>
|
||||
#include "blk.h"
|
||||
|
||||
struct bio_integrity_alloc {
|
||||
@@ -16,6 +17,53 @@ struct bio_integrity_alloc {
|
||||
|
||||
static mempool_t integrity_buf_pool;
|
||||
|
||||
static bool bi_offload_capable(struct blk_integrity *bi)
|
||||
{
|
||||
return bi->metadata_size == bi->pi_tuple_size;
|
||||
}
|
||||
|
||||
unsigned int __bio_integrity_action(struct bio *bio)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
|
||||
if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
|
||||
return 0;
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_READ:
|
||||
if (bi->flags & BLK_INTEGRITY_NOVERIFY) {
|
||||
if (bi_offload_capable(bi))
|
||||
return 0;
|
||||
return BI_ACT_BUFFER;
|
||||
}
|
||||
return BI_ACT_BUFFER | BI_ACT_CHECK;
|
||||
case REQ_OP_WRITE:
|
||||
/*
|
||||
* Flush masquerading as write?
|
||||
*/
|
||||
if (!bio_sectors(bio))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Zero the memory allocated to not leak uninitialized kernel
|
||||
* memory to disk for non-integrity metadata where nothing else
|
||||
* initializes the memory.
|
||||
*/
|
||||
if (bi->flags & BLK_INTEGRITY_NOGENERATE) {
|
||||
if (bi_offload_capable(bi))
|
||||
return 0;
|
||||
return BI_ACT_BUFFER | BI_ACT_ZERO;
|
||||
}
|
||||
|
||||
if (bi->metadata_size > bi->pi_tuple_size)
|
||||
return BI_ACT_BUFFER | BI_ACT_CHECK | BI_ACT_ZERO;
|
||||
return BI_ACT_BUFFER | BI_ACT_CHECK;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bio_integrity_action);
|
||||
|
||||
void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
@@ -53,6 +101,22 @@ void bio_integrity_free_buf(struct bio_integrity_payload *bip)
|
||||
kfree(bvec_virt(bv));
|
||||
}
|
||||
|
||||
void bio_integrity_setup_default(struct bio *bio)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
|
||||
bip_set_seed(bip, bio->bi_iter.bi_sector);
|
||||
|
||||
if (bi->csum_type) {
|
||||
bip->bip_flags |= BIP_CHECK_GUARD;
|
||||
if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
|
||||
bip->bip_flags |= BIP_IP_CHECKSUM;
|
||||
}
|
||||
if (bi->flags & BLK_INTEGRITY_REF_TAG)
|
||||
bip->bip_flags |= BIP_CHECK_REFTAG;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_free - Free bio integrity payload
|
||||
* @bio: bio containing bip to be freed
|
||||
|
||||
17
block/bio.c
17
block/bio.c
@@ -1327,9 +1327,10 @@ static void bio_free_folios(struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter)
|
||||
static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter,
|
||||
size_t maxlen)
|
||||
{
|
||||
size_t total_len = iov_iter_count(iter);
|
||||
size_t total_len = min(maxlen, iov_iter_count(iter));
|
||||
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return -EINVAL;
|
||||
@@ -1367,9 +1368,10 @@ static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
|
||||
static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter,
|
||||
size_t maxlen)
|
||||
{
|
||||
size_t len = min(iov_iter_count(iter), SZ_1M);
|
||||
size_t len = min3(iov_iter_count(iter), maxlen, SZ_1M);
|
||||
struct folio *folio;
|
||||
|
||||
folio = folio_alloc_greedy(GFP_KERNEL, &len);
|
||||
@@ -1408,6 +1410,7 @@ static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
|
||||
* bio_iov_iter_bounce - bounce buffer data from an iter into a bio
|
||||
* @bio: bio to send
|
||||
* @iter: iter to read from / write into
|
||||
* @maxlen: maximum size to bounce
|
||||
*
|
||||
* Helper for direct I/O implementations that need to bounce buffer because
|
||||
* we need to checksum the data or perform other operations that require
|
||||
@@ -1415,11 +1418,11 @@ static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
|
||||
* copies the data into it. Needs to be paired with bio_iov_iter_unbounce()
|
||||
* called on completion.
|
||||
*/
|
||||
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter)
|
||||
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen)
|
||||
{
|
||||
if (op_is_write(bio_op(bio)))
|
||||
return bio_iov_iter_bounce_write(bio, iter);
|
||||
return bio_iov_iter_bounce_read(bio, iter);
|
||||
return bio_iov_iter_bounce_write(bio, iter, maxlen);
|
||||
return bio_iov_iter_bounce_read(bio, iter, maxlen);
|
||||
}
|
||||
|
||||
static void bvec_unpin(struct bio_vec *bv, bool mark_dirty)
|
||||
|
||||
@@ -3143,6 +3143,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
struct blk_plug *plug = current->plug;
|
||||
const int is_sync = op_is_sync(bio->bi_opf);
|
||||
unsigned int integrity_action;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned int nr_segs;
|
||||
struct request *rq;
|
||||
@@ -3195,8 +3196,9 @@ void blk_mq_submit_bio(struct bio *bio)
|
||||
if (!bio)
|
||||
goto queue_exit;
|
||||
|
||||
if (!bio_integrity_prep(bio))
|
||||
goto queue_exit;
|
||||
integrity_action = bio_integrity_action(bio);
|
||||
if (integrity_action)
|
||||
bio_integrity_prep(bio, integrity_action);
|
||||
|
||||
blk_mq_bio_issue_init(q, bio);
|
||||
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
|
||||
|
||||
@@ -123,19 +123,6 @@ static int blk_validate_zoned_limits(struct queue_limits *lim)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Maximum size of I/O that needs a block layer integrity buffer. Limited
|
||||
* by the number of intervals for which we can fit the integrity buffer into
|
||||
* the buffer size. Because the buffer is a single segment it is also limited
|
||||
* by the maximum segment size.
|
||||
*/
|
||||
static inline unsigned int max_integrity_io_size(struct queue_limits *lim)
|
||||
{
|
||||
return min_t(unsigned int, lim->max_segment_size,
|
||||
(BLK_INTEGRITY_MAX_SIZE / lim->integrity.metadata_size) <<
|
||||
lim->integrity.interval_exp);
|
||||
}
|
||||
|
||||
static int blk_validate_integrity_limits(struct queue_limits *lim)
|
||||
{
|
||||
struct blk_integrity *bi = &lim->integrity;
|
||||
|
||||
@@ -699,8 +699,10 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
|
||||
const struct blk_holder_ops *hops, struct file *bdev_file);
|
||||
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
|
||||
|
||||
void blk_integrity_generate(struct bio *bio);
|
||||
void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
|
||||
void bio_integrity_generate(struct bio *bio);
|
||||
blk_status_t bio_integrity_verify(struct bio *bio,
|
||||
struct bvec_iter *saved_iter);
|
||||
|
||||
void blk_integrity_prepare(struct request *rq);
|
||||
void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
|
||||
|
||||
|
||||
@@ -372,7 +372,7 @@ static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
|
||||
}
|
||||
}
|
||||
|
||||
void blk_integrity_generate(struct bio *bio)
|
||||
void bio_integrity_generate(struct bio *bio)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
@@ -404,7 +404,7 @@ void blk_integrity_generate(struct bio *bio)
|
||||
}
|
||||
}
|
||||
|
||||
void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter)
|
||||
blk_status_t bio_integrity_verify(struct bio *bio, struct bvec_iter *saved_iter)
|
||||
{
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
@@ -439,11 +439,11 @@ void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter)
|
||||
}
|
||||
kunmap_local(kaddr);
|
||||
|
||||
if (ret) {
|
||||
bio->bi_status = ret;
|
||||
return;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
void blk_integrity_prepare(struct request *rq)
|
||||
|
||||
@@ -1435,14 +1435,16 @@ static void btt_submit_bio(struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
|
||||
unsigned int integrity_action;
|
||||
struct bvec_iter iter;
|
||||
unsigned long start;
|
||||
struct bio_vec bvec;
|
||||
int err = 0;
|
||||
bool do_acct;
|
||||
|
||||
if (!bio_integrity_prep(bio))
|
||||
return;
|
||||
integrity_action = bio_integrity_action(bio);
|
||||
if (integrity_action)
|
||||
bio_integrity_prep(bio, integrity_action);
|
||||
|
||||
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
|
||||
if (do_acct)
|
||||
|
||||
@@ -947,7 +947,8 @@ static int fuse_iomap_read_folio_range_async(const struct iomap_iter *iter,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
|
||||
static void fuse_iomap_submit_read(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct fuse_fill_read_data *data = ctx->read_ctx;
|
||||
|
||||
@@ -958,7 +959,7 @@ static void fuse_iomap_read_submit(struct iomap_read_folio_ctx *ctx)
|
||||
|
||||
static const struct iomap_read_ops fuse_iomap_read_ops = {
|
||||
.read_folio_range = fuse_iomap_read_folio_range_async,
|
||||
.submit_read = fuse_iomap_read_submit,
|
||||
.submit_read = fuse_iomap_submit_read,
|
||||
};
|
||||
|
||||
static int fuse_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
134
fs/iomap/bio.c
134
fs/iomap/bio.c
@@ -3,6 +3,7 @@
|
||||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (C) 2016-2023 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include "internal.h"
|
||||
@@ -11,14 +12,19 @@
|
||||
static DEFINE_SPINLOCK(failed_read_lock);
|
||||
static struct bio_list failed_read_list = BIO_EMPTY_LIST;
|
||||
|
||||
static void __iomap_read_end_io(struct bio *bio)
|
||||
static u32 __iomap_read_end_io(struct bio *bio, int error)
|
||||
{
|
||||
int error = blk_status_to_errno(bio->bi_status);
|
||||
struct folio_iter fi;
|
||||
u32 folio_count = 0;
|
||||
|
||||
bio_for_each_folio_all(fi, bio)
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
|
||||
folio_count++;
|
||||
}
|
||||
if (bio_integrity(bio))
|
||||
fs_bio_integrity_free(bio);
|
||||
bio_put(bio);
|
||||
return folio_count;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -34,7 +40,7 @@ iomap_fail_reads(
|
||||
spin_unlock_irqrestore(&failed_read_lock, flags);
|
||||
|
||||
while ((bio = bio_list_pop(&tmp)) != NULL) {
|
||||
__iomap_read_end_io(bio);
|
||||
__iomap_read_end_io(bio, blk_status_to_errno(bio->bi_status));
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
@@ -64,62 +70,88 @@ static void iomap_read_end_io(struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
__iomap_read_end_io(bio);
|
||||
__iomap_read_end_io(bio, 0);
|
||||
}
|
||||
|
||||
static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
|
||||
u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend)
|
||||
{
|
||||
return __iomap_read_end_io(&ioend->io_bio, ioend->io_error);
|
||||
}
|
||||
|
||||
static void iomap_bio_submit_read(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
if (iter->iomap.flags & IOMAP_F_INTEGRITY)
|
||||
fs_bio_integrity_alloc(bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
static struct bio_set *iomap_read_bio_set(struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
if (ctx->ops && ctx->ops->bio_set)
|
||||
return ctx->ops->bio_set;
|
||||
return &fs_bio_set;
|
||||
}
|
||||
|
||||
static void iomap_read_alloc_bio(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t plen)
|
||||
{
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
unsigned int nr_vecs = DIV_ROUND_UP(iomap_length(iter), PAGE_SIZE);
|
||||
struct bio_set *bio_set = iomap_read_bio_set(ctx);
|
||||
struct folio *folio = ctx->cur_folio;
|
||||
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
struct bio *bio;
|
||||
|
||||
/* Submit the existing range if there was one. */
|
||||
if (ctx->read_ctx)
|
||||
ctx->ops->submit_read(iter, ctx);
|
||||
|
||||
/* Same as readahead_gfp_mask: */
|
||||
if (ctx->rac)
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
|
||||
/*
|
||||
* If the bio_alloc fails, try it again for a single page to avoid
|
||||
* having to deal with partial page reads. This emulates what
|
||||
* do_mpage_read_folio does.
|
||||
*/
|
||||
bio = bio_alloc_bioset(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
|
||||
gfp, bio_set);
|
||||
if (!bio)
|
||||
bio = bio_alloc_bioset(iomap->bdev, 1, REQ_OP_READ, orig_gfp,
|
||||
bio_set);
|
||||
if (ctx->rac)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
bio->bi_iter.bi_sector = iomap_sector(iomap, iter->pos);
|
||||
bio->bi_end_io = iomap_read_end_io;
|
||||
bio_add_folio_nofail(bio, folio, plen,
|
||||
offset_in_folio(folio, iter->pos));
|
||||
ctx->read_ctx = bio;
|
||||
ctx->read_ctx_file_offset = iter->pos;
|
||||
}
|
||||
|
||||
int iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t plen)
|
||||
{
|
||||
struct folio *folio = ctx->cur_folio;
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
loff_t pos = iter->pos;
|
||||
size_t poff = offset_in_folio(folio, pos);
|
||||
loff_t length = iomap_length(iter);
|
||||
sector_t sector;
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
||||
sector = iomap_sector(iomap, pos);
|
||||
if (!bio || bio_end_sector(bio) != sector ||
|
||||
!bio_add_folio(bio, folio, plen, poff)) {
|
||||
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
|
||||
if (ctx->rac) /* same as readahead_gfp_mask */
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
|
||||
gfp);
|
||||
/*
|
||||
* If the bio_alloc fails, try it again for a single page to
|
||||
* avoid having to deal with partial page reads. This emulates
|
||||
* what do_mpage_read_folio does.
|
||||
*/
|
||||
if (!bio)
|
||||
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
|
||||
if (ctx->rac)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = iomap_read_end_io;
|
||||
bio_add_folio_nofail(bio, folio, plen, poff);
|
||||
ctx->read_ctx = bio;
|
||||
}
|
||||
if (!bio ||
|
||||
bio_end_sector(bio) != iomap_sector(&iter->iomap, iter->pos) ||
|
||||
bio->bi_iter.bi_size > iomap_max_bio_size(&iter->iomap) - plen ||
|
||||
!bio_add_folio(bio, folio, plen, offset_in_folio(folio, iter->pos)))
|
||||
iomap_read_alloc_bio(iter, ctx, plen);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_bio_read_folio_range);
|
||||
|
||||
const struct iomap_read_ops iomap_bio_read_ops = {
|
||||
.read_folio_range = iomap_bio_read_folio_range,
|
||||
.submit_read = iomap_bio_submit_read,
|
||||
.read_folio_range = iomap_bio_read_folio_range,
|
||||
.submit_read = iomap_bio_submit_read,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
|
||||
|
||||
@@ -127,11 +159,21 @@ int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
|
||||
struct folio *folio, loff_t pos, size_t len)
|
||||
{
|
||||
const struct iomap *srcmap = iomap_iter_srcmap(iter);
|
||||
sector_t sector = iomap_sector(srcmap, pos);
|
||||
struct bio_vec bvec;
|
||||
struct bio bio;
|
||||
int error;
|
||||
|
||||
bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
|
||||
bio.bi_iter.bi_sector = sector;
|
||||
bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
|
||||
return submit_bio_wait(&bio);
|
||||
if (srcmap->flags & IOMAP_F_INTEGRITY)
|
||||
fs_bio_integrity_alloc(&bio);
|
||||
error = submit_bio_wait(&bio);
|
||||
if (srcmap->flags & IOMAP_F_INTEGRITY) {
|
||||
if (!error)
|
||||
error = fs_bio_integrity_verify(&bio, sector, len);
|
||||
fs_bio_integrity_free(&bio);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -601,8 +601,8 @@ void iomap_read_folio(const struct iomap_ops *ops,
|
||||
iter.status = iomap_read_folio_iter(&iter, ctx,
|
||||
&bytes_submitted);
|
||||
|
||||
if (ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(ctx);
|
||||
if (ctx->read_ctx && ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(&iter, ctx);
|
||||
|
||||
if (ctx->cur_folio)
|
||||
iomap_read_end(ctx->cur_folio, bytes_submitted);
|
||||
@@ -668,8 +668,8 @@ void iomap_readahead(const struct iomap_ops *ops,
|
||||
iter.status = iomap_readahead_iter(&iter, ctx,
|
||||
&cur_bytes_submitted);
|
||||
|
||||
if (ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(ctx);
|
||||
if (ctx->read_ctx && ctx->ops->submit_read)
|
||||
ctx->ops->submit_read(&iter, ctx);
|
||||
|
||||
if (ctx->cur_folio)
|
||||
iomap_read_end(ctx->cur_folio, cur_bytes_submitted);
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
* Copyright (C) 2010 Red Hat, Inc.
|
||||
* Copyright (c) 2016-2025 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/blk-crypto.h>
|
||||
#include <linux/fscrypt.h>
|
||||
#include <linux/pagemap.h>
|
||||
@@ -240,6 +241,9 @@ static void __iomap_dio_bio_end_io(struct bio *bio, bool inline_completion)
|
||||
{
|
||||
struct iomap_dio *dio = bio->bi_private;
|
||||
|
||||
if (bio_integrity(bio))
|
||||
fs_bio_integrity_free(bio);
|
||||
|
||||
if (dio->flags & IOMAP_DIO_BOUNCE) {
|
||||
bio_iov_iter_unbounce(bio, !!dio->error,
|
||||
dio->flags & IOMAP_DIO_USER_BACKED);
|
||||
@@ -350,8 +354,10 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
|
||||
bio->bi_private = dio;
|
||||
bio->bi_end_io = iomap_dio_bio_end_io;
|
||||
|
||||
|
||||
if (dio->flags & IOMAP_DIO_BOUNCE)
|
||||
ret = bio_iov_iter_bounce(bio, dio->submit.iter);
|
||||
ret = bio_iov_iter_bounce(bio, dio->submit.iter,
|
||||
iomap_max_bio_size(&iter->iomap));
|
||||
else
|
||||
ret = bio_iov_iter_get_pages(bio, dio->submit.iter,
|
||||
alignment - 1);
|
||||
@@ -368,6 +374,13 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
|
||||
goto out_put_bio;
|
||||
}
|
||||
|
||||
if (iter->iomap.flags & IOMAP_F_INTEGRITY) {
|
||||
if (dio->flags & IOMAP_DIO_WRITE)
|
||||
fs_bio_integrity_generate(bio);
|
||||
else
|
||||
fs_bio_integrity_alloc(bio);
|
||||
}
|
||||
|
||||
if (dio->flags & IOMAP_DIO_WRITE)
|
||||
task_io_account_write(ret);
|
||||
else if ((dio->flags & IOMAP_DIO_USER_BACKED) &&
|
||||
|
||||
@@ -4,6 +4,20 @@
|
||||
|
||||
#define IOEND_BATCH_SIZE 4096
|
||||
|
||||
/*
|
||||
* Normally we can build bios as big as the data structure supports.
|
||||
*
|
||||
* But for integrity protected I/O we need to respect the maximum size of the
|
||||
* single contiguous allocation for the integrity buffer.
|
||||
*/
|
||||
static inline size_t iomap_max_bio_size(const struct iomap *iomap)
|
||||
{
|
||||
if (iomap->flags & IOMAP_F_INTEGRITY)
|
||||
return max_integrity_io_size(bdev_limits(iomap->bdev));
|
||||
return BIO_MAX_SIZE;
|
||||
}
|
||||
|
||||
u32 iomap_finish_ioend_buffered_read(struct iomap_ioend *ioend);
|
||||
u32 iomap_finish_ioend_direct(struct iomap_ioend *ioend);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2025 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/bio-integrity.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/pagemap.h>
|
||||
@@ -37,7 +38,7 @@ EXPORT_SYMBOL_GPL(iomap_init_ioend);
|
||||
* state, release holds on bios, and finally free up memory. Do not use the
|
||||
* ioend after this.
|
||||
*/
|
||||
static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
|
||||
static u32 iomap_finish_ioend_buffered_write(struct iomap_ioend *ioend)
|
||||
{
|
||||
struct inode *inode = ioend->io_inode;
|
||||
struct bio *bio = &ioend->io_bio;
|
||||
@@ -65,6 +66,8 @@ static u32 iomap_finish_ioend_buffered(struct iomap_ioend *ioend)
|
||||
folio_count++;
|
||||
}
|
||||
|
||||
if (bio_integrity(bio))
|
||||
fs_bio_integrity_free(bio);
|
||||
bio_put(bio); /* frees the ioend */
|
||||
return folio_count;
|
||||
}
|
||||
@@ -87,7 +90,7 @@ iomap_fail_ioends(
|
||||
while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
|
||||
io_list))) {
|
||||
list_del_init(&ioend->io_list);
|
||||
iomap_finish_ioend_buffered(ioend);
|
||||
iomap_finish_ioend_buffered_write(ioend);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
@@ -120,7 +123,7 @@ static void ioend_writeback_end_bio(struct bio *bio)
|
||||
return;
|
||||
}
|
||||
|
||||
iomap_finish_ioend_buffered(ioend);
|
||||
iomap_finish_ioend_buffered_write(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -144,6 +147,8 @@ int iomap_ioend_writeback_submit(struct iomap_writepage_ctx *wpc, int error)
|
||||
return error;
|
||||
}
|
||||
|
||||
if (wpc->iomap.flags & IOMAP_F_INTEGRITY)
|
||||
fs_bio_integrity_generate(&ioend->io_bio);
|
||||
submit_bio(&ioend->io_bio);
|
||||
return 0;
|
||||
}
|
||||
@@ -165,10 +170,13 @@ static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
|
||||
}
|
||||
|
||||
static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos,
|
||||
u16 ioend_flags)
|
||||
unsigned int map_len, u16 ioend_flags)
|
||||
{
|
||||
struct iomap_ioend *ioend = wpc->wb_ctx;
|
||||
|
||||
if (ioend->io_bio.bi_iter.bi_size >
|
||||
iomap_max_bio_size(&wpc->iomap) - map_len)
|
||||
return false;
|
||||
if (ioend_flags & IOMAP_IOEND_BOUNDARY)
|
||||
return false;
|
||||
if ((ioend_flags & IOMAP_IOEND_NOMERGE_FLAGS) !=
|
||||
@@ -234,7 +242,7 @@ ssize_t iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct folio *folio,
|
||||
if (pos == wpc->iomap.offset && (wpc->iomap.flags & IOMAP_F_BOUNDARY))
|
||||
ioend_flags |= IOMAP_IOEND_BOUNDARY;
|
||||
|
||||
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, ioend_flags)) {
|
||||
if (!ioend || !iomap_can_add_to_ioend(wpc, pos, map_len, ioend_flags)) {
|
||||
new_ioend:
|
||||
if (ioend) {
|
||||
error = wpc->ops->writeback_submit(wpc, 0);
|
||||
@@ -311,9 +319,19 @@ static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
|
||||
|
||||
if (!atomic_dec_and_test(&ioend->io_remaining))
|
||||
return 0;
|
||||
|
||||
if (!ioend->io_error &&
|
||||
bio_integrity(&ioend->io_bio) &&
|
||||
bio_op(&ioend->io_bio) == REQ_OP_READ) {
|
||||
ioend->io_error = fs_bio_integrity_verify(&ioend->io_bio,
|
||||
ioend->io_sector, ioend->io_size);
|
||||
}
|
||||
|
||||
if (ioend->io_flags & IOMAP_IOEND_DIRECT)
|
||||
return iomap_finish_ioend_direct(ioend);
|
||||
return iomap_finish_ioend_buffered(ioend);
|
||||
if (bio_op(&ioend->io_bio) == REQ_OP_READ)
|
||||
return iomap_finish_ioend_buffered_read(ioend);
|
||||
return iomap_finish_ioend_buffered_write(ioend);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@@ -605,63 +605,18 @@ static void ntfs_iomap_read_end_io(struct bio *bio)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copied from iomap/bio.c.
|
||||
*/
|
||||
static int ntfs_iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx,
|
||||
size_t plen)
|
||||
{
|
||||
struct folio *folio = ctx->cur_folio;
|
||||
const struct iomap *iomap = &iter->iomap;
|
||||
loff_t pos = iter->pos;
|
||||
size_t poff = offset_in_folio(folio, pos);
|
||||
loff_t length = iomap_length(iter);
|
||||
sector_t sector;
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
||||
sector = iomap_sector(iomap, pos);
|
||||
if (!bio || bio_end_sector(bio) != sector ||
|
||||
!bio_add_folio(bio, folio, plen, poff)) {
|
||||
gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
|
||||
gfp_t orig_gfp = gfp;
|
||||
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
|
||||
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
|
||||
if (ctx->rac) /* same as readahead_gfp_mask */
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
|
||||
gfp);
|
||||
/*
|
||||
* If the bio_alloc fails, try it again for a single page to
|
||||
* avoid having to deal with partial page reads. This emulates
|
||||
* what do_mpage_read_folio does.
|
||||
*/
|
||||
if (!bio)
|
||||
bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
|
||||
if (ctx->rac)
|
||||
bio->bi_opf |= REQ_RAHEAD;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = ntfs_iomap_read_end_io;
|
||||
bio_add_folio_nofail(bio, folio, plen, poff);
|
||||
ctx->read_ctx = bio;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ntfs_iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
|
||||
static void ntfs_iomap_bio_submit_read(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
bio->bi_end_io = ntfs_iomap_read_end_io;
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static const struct iomap_read_ops ntfs_iomap_bio_read_ops = {
|
||||
.read_folio_range = ntfs_iomap_bio_read_folio_range,
|
||||
.submit_read = ntfs_iomap_bio_submit_read,
|
||||
.read_folio_range = iomap_bio_read_folio_range,
|
||||
.submit_read = ntfs_iomap_bio_submit_read,
|
||||
};
|
||||
|
||||
static int ntfs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_zone_alloc.h"
|
||||
#include "xfs_rtgroup.h"
|
||||
#include <linux/bio-integrity.h>
|
||||
|
||||
struct xfs_writepage_ctx {
|
||||
struct iomap_writepage_ctx ctx;
|
||||
@@ -661,6 +662,8 @@ xfs_zoned_writeback_submit(
|
||||
bio_endio(&ioend->io_bio);
|
||||
return error;
|
||||
}
|
||||
if (wpc->iomap.flags & IOMAP_F_INTEGRITY)
|
||||
fs_bio_integrity_generate(&ioend->io_bio);
|
||||
xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone);
|
||||
return 0;
|
||||
}
|
||||
@@ -741,12 +744,45 @@ xfs_vm_bmap(
|
||||
return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
|
||||
}
|
||||
|
||||
static void
|
||||
xfs_bio_submit_read(
|
||||
const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx)
|
||||
{
|
||||
struct bio *bio = ctx->read_ctx;
|
||||
|
||||
/* defer read completions to the ioend workqueue */
|
||||
iomap_init_ioend(iter->inode, bio, ctx->read_ctx_file_offset, 0);
|
||||
bio->bi_end_io = xfs_end_bio;
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static const struct iomap_read_ops xfs_iomap_read_ops = {
|
||||
.read_folio_range = iomap_bio_read_folio_range,
|
||||
.submit_read = xfs_bio_submit_read,
|
||||
.bio_set = &iomap_ioend_bioset,
|
||||
};
|
||||
|
||||
static inline const struct iomap_read_ops *
|
||||
xfs_get_iomap_read_ops(
|
||||
const struct address_space *mapping)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(mapping->host);
|
||||
|
||||
if (bdev_has_integrity_csum(xfs_inode_buftarg(ip)->bt_bdev))
|
||||
return &xfs_iomap_read_ops;
|
||||
return &iomap_bio_read_ops;
|
||||
}
|
||||
|
||||
STATIC int
|
||||
xfs_vm_read_folio(
|
||||
struct file *unused,
|
||||
struct folio *folio)
|
||||
struct file *file,
|
||||
struct folio *folio)
|
||||
{
|
||||
iomap_bio_read_folio(folio, &xfs_read_iomap_ops);
|
||||
struct iomap_read_folio_ctx ctx = { .cur_folio = folio };
|
||||
|
||||
ctx.ops = xfs_get_iomap_read_ops(folio->mapping);
|
||||
iomap_read_folio(&xfs_read_iomap_ops, &ctx, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -754,7 +790,10 @@ STATIC void
|
||||
xfs_vm_readahead(
|
||||
struct readahead_control *rac)
|
||||
{
|
||||
iomap_bio_readahead(rac, &xfs_read_iomap_ops);
|
||||
struct iomap_read_folio_ctx ctx = { .rac = rac };
|
||||
|
||||
ctx.ops = xfs_get_iomap_read_ops(rac->mapping),
|
||||
iomap_readahead(&xfs_read_iomap_ops, &ctx, NULL);
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
@@ -143,11 +143,14 @@ xfs_bmbt_to_iomap(
|
||||
}
|
||||
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
|
||||
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
|
||||
if (mapping_flags & IOMAP_DAX)
|
||||
iomap->dax_dev = target->bt_daxdev;
|
||||
else
|
||||
iomap->bdev = target->bt_bdev;
|
||||
iomap->flags = iomap_flags;
|
||||
if (mapping_flags & IOMAP_DAX) {
|
||||
iomap->dax_dev = target->bt_daxdev;
|
||||
} else {
|
||||
iomap->bdev = target->bt_bdev;
|
||||
if (bdev_has_integrity_csum(iomap->bdev))
|
||||
iomap->flags |= IOMAP_F_INTEGRITY;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the inode is dirty for datasync purposes, let iomap know so it
|
||||
|
||||
@@ -78,7 +78,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||
int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter);
|
||||
int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta);
|
||||
void bio_integrity_unmap_user(struct bio *bio);
|
||||
bool bio_integrity_prep(struct bio *bio);
|
||||
void bio_integrity_prep(struct bio *bio, unsigned int action);
|
||||
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done);
|
||||
void bio_integrity_trim(struct bio *bio);
|
||||
int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask);
|
||||
@@ -104,9 +104,8 @@ static inline void bio_integrity_unmap_user(struct bio *bio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool bio_integrity_prep(struct bio *bio)
|
||||
static inline void bio_integrity_prep(struct bio *bio, unsigned int action)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
|
||||
@@ -144,5 +143,12 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
|
||||
|
||||
void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer);
|
||||
void bio_integrity_free_buf(struct bio_integrity_payload *bip);
|
||||
void bio_integrity_setup_default(struct bio *bio);
|
||||
|
||||
unsigned int fs_bio_integrity_alloc(struct bio *bio);
|
||||
void fs_bio_integrity_free(struct bio *bio);
|
||||
void fs_bio_integrity_generate(struct bio *bio);
|
||||
int fs_bio_integrity_verify(struct bio *bio, sector_t sector,
|
||||
unsigned int size);
|
||||
|
||||
#endif /* _LINUX_BIO_INTEGRITY_H */
|
||||
|
||||
@@ -474,7 +474,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
|
||||
extern void bio_set_pages_dirty(struct bio *bio);
|
||||
extern void bio_check_pages_dirty(struct bio *bio);
|
||||
|
||||
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter);
|
||||
int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen);
|
||||
void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty);
|
||||
|
||||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
|
||||
@@ -8,11 +8,6 @@
|
||||
|
||||
struct request;
|
||||
|
||||
/*
|
||||
* Maximum contiguous integrity buffer allocation.
|
||||
*/
|
||||
#define BLK_INTEGRITY_MAX_SIZE SZ_2M
|
||||
|
||||
enum blk_integrity_flags {
|
||||
BLK_INTEGRITY_NOVERIFY = 1 << 0,
|
||||
BLK_INTEGRITY_NOGENERATE = 1 << 1,
|
||||
@@ -180,4 +175,27 @@ static inline struct bio_vec rq_integrity_vec(struct request *rq)
|
||||
}
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
enum bio_integrity_action {
|
||||
BI_ACT_BUFFER = (1u << 0), /* allocate buffer */
|
||||
BI_ACT_CHECK = (1u << 1), /* generate / verify PI */
|
||||
BI_ACT_ZERO = (1u << 2), /* zero buffer */
|
||||
};
|
||||
|
||||
/**
|
||||
* bio_integrity_action - return the integrity action needed for a bio
|
||||
* @bio: bio to operate on
|
||||
*
|
||||
* Returns the mask of integrity actions (BI_ACT_*) that need to be performed
|
||||
* for @bio.
|
||||
*/
|
||||
unsigned int __bio_integrity_action(struct bio *bio);
|
||||
static inline unsigned int bio_integrity_action(struct bio *bio)
|
||||
{
|
||||
if (!blk_get_integrity(bio->bi_bdev->bd_disk))
|
||||
return 0;
|
||||
if (bio_integrity(bio))
|
||||
return 0;
|
||||
return __bio_integrity_action(bio);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_BLK_INTEGRITY_H */
|
||||
|
||||
@@ -1477,14 +1477,18 @@ static inline bool bdev_synchronous(struct block_device *bdev)
|
||||
return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
|
||||
}
|
||||
|
||||
static inline bool bdev_has_integrity_csum(struct block_device *bdev)
|
||||
{
|
||||
struct queue_limits *lim = bdev_limits(bdev);
|
||||
|
||||
return IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
|
||||
lim->integrity.csum_type != BLK_INTEGRITY_CSUM_NONE;
|
||||
}
|
||||
|
||||
static inline bool bdev_stable_writes(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
|
||||
q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
|
||||
return true;
|
||||
return q->limits.features & BLK_FEAT_STABLE_WRITES;
|
||||
return bdev_has_integrity_csum(bdev) ||
|
||||
(bdev_limits(bdev)->features & BLK_FEAT_STABLE_WRITES);
|
||||
}
|
||||
|
||||
static inline bool blk_queue_write_cache(struct request_queue *q)
|
||||
@@ -1877,6 +1881,24 @@ static inline int bio_split_rw_at(struct bio *bio,
|
||||
return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
|
||||
}
|
||||
|
||||
/*
|
||||
* Maximum contiguous integrity buffer allocation.
|
||||
*/
|
||||
#define BLK_INTEGRITY_MAX_SIZE SZ_2M
|
||||
|
||||
/*
|
||||
* Maximum size of I/O that needs a block layer integrity buffer. Limited
|
||||
* by the number of intervals for which we can fit the integrity buffer into
|
||||
* the buffer size. Because the buffer is a single segment it is also limited
|
||||
* by the maximum segment size.
|
||||
*/
|
||||
static inline unsigned int max_integrity_io_size(struct queue_limits *lim)
|
||||
{
|
||||
return min_t(unsigned int, lim->max_segment_size,
|
||||
(BLK_INTEGRITY_MAX_SIZE / lim->integrity.metadata_size) <<
|
||||
lim->integrity.interval_exp);
|
||||
}
|
||||
|
||||
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
|
||||
|
||||
#endif /* _LINUX_BLKDEV_H */
|
||||
|
||||
@@ -65,6 +65,8 @@ struct vm_fault;
|
||||
*
|
||||
* IOMAP_F_ATOMIC_BIO indicates that (write) I/O will be issued as an atomic
|
||||
* bio, i.e. set REQ_ATOMIC.
|
||||
*
|
||||
* IOMAP_F_INTEGRITY indicates that the filesystems handles integrity metadata.
|
||||
*/
|
||||
#define IOMAP_F_NEW (1U << 0)
|
||||
#define IOMAP_F_DIRTY (1U << 1)
|
||||
@@ -79,6 +81,11 @@ struct vm_fault;
|
||||
#define IOMAP_F_BOUNDARY (1U << 6)
|
||||
#define IOMAP_F_ANON_WRITE (1U << 7)
|
||||
#define IOMAP_F_ATOMIC_BIO (1U << 8)
|
||||
#ifdef CONFIG_BLK_DEV_INTEGRITY
|
||||
#define IOMAP_F_INTEGRITY (1U << 9)
|
||||
#else
|
||||
#define IOMAP_F_INTEGRITY 0
|
||||
#endif /* CONFIG_BLK_DEV_INTEGRITY */
|
||||
|
||||
/*
|
||||
* Flag reserved for file system specific usage
|
||||
@@ -493,6 +500,7 @@ struct iomap_read_folio_ctx {
|
||||
struct folio *cur_folio;
|
||||
struct readahead_control *rac;
|
||||
void *read_ctx;
|
||||
loff_t read_ctx_file_offset;
|
||||
};
|
||||
|
||||
struct iomap_read_ops {
|
||||
@@ -512,7 +520,14 @@ struct iomap_read_ops {
|
||||
*
|
||||
* This is optional.
|
||||
*/
|
||||
void (*submit_read)(struct iomap_read_folio_ctx *ctx);
|
||||
void (*submit_read)(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx);
|
||||
|
||||
/*
|
||||
* Optional, allows filesystem to specify own bio_set, so new bio's
|
||||
* can be allocated from the provided bio_set.
|
||||
*/
|
||||
struct bio_set *bio_set;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -598,6 +613,9 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
|
||||
extern struct bio_set iomap_ioend_bioset;
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
int iomap_bio_read_folio_range(const struct iomap_iter *iter,
|
||||
struct iomap_read_folio_ctx *ctx, size_t plen);
|
||||
|
||||
extern const struct iomap_read_ops iomap_bio_read_ops;
|
||||
|
||||
static inline void iomap_bio_read_folio(struct folio *folio,
|
||||
|
||||
Reference in New Issue
Block a user