Merge tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/linux

Pull fscrypt updates from Eric Biggers:

 - Various cleanups for the interface between fs/crypto/ and
   filesystems, from Christoph Hellwig

 - Simplify and optimize the implementation of v1 key derivation by
   using the AES library instead of the crypto_skcipher API

* tag 'fscrypt-for-linus' of git://git.kernel.org/pub/scm/fs/fscrypt/linux:
  fscrypt: use AES library for v1 key derivation
  ext4: use a byte granularity cursor in ext4_mpage_readpages
  fscrypt: pass a real sector_t to fscrypt_zeroout_range
  fscrypt: pass a byte length to fscrypt_zeroout_range
  fscrypt: pass a byte offset to fscrypt_zeroout_range
  fscrypt: pass a byte length to fscrypt_zeroout_range_inline_crypt
  fscrypt: pass a byte offset to fscrypt_zeroout_range_inline_crypt
  fscrypt: pass a byte offset to fscrypt_set_bio_crypt_ctx
  fscrypt: pass a byte offset to fscrypt_mergeable_bio
  fscrypt: pass a byte offset to fscrypt_generate_dun
  fscrypt: move fscrypt_set_bio_crypt_ctx_bh to buffer.c
  ext4, fscrypt: merge fscrypt_mergeable_bio_bh into io_submit_need_new_bio
  ext4: factor out a io_submit_need_new_bio helper
  ext4: open code fscrypt_set_bio_crypt_ctx_bh
  ext4: initialize the write hint in io_submit_init_bio
This commit is contained in:
Linus Torvalds
2026-04-13 17:29:12 -07:00
14 changed files with 120 additions and 213 deletions

View File

@@ -2667,6 +2667,21 @@ static void end_bio_bh_io_sync(struct bio *bio)
bio_put(bio);
}
static void buffer_set_crypto_ctx(struct bio *bio, const struct buffer_head *bh,
gfp_t gfp_mask)
{
const struct address_space *mapping = folio_mapping(bh->b_folio);
/*
* The ext4 journal (jbd2) can submit a buffer_head it directly created
* for a non-pagecache page. fscrypt doesn't care about these.
*/
if (!mapping)
return;
fscrypt_set_bio_crypt_ctx(bio, mapping->host,
folio_pos(bh->b_folio) + bh_offset(bh), gfp_mask);
}
static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
enum rw_hint write_hint,
struct writeback_control *wbc)
@@ -2693,7 +2708,8 @@ static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
if (IS_ENABLED(CONFIG_FS_ENCRYPTION))
buffer_set_crypto_ctx(bio, bh, GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_write_hint = write_hint;

View File

@@ -3,6 +3,7 @@ config FS_ENCRYPTION
bool "FS Encryption (Per-file encryption)"
select CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_LIB_AES
select CRYPTO_LIB_SHA256
select CRYPTO_LIB_SHA512
select KEYS
@@ -30,7 +31,6 @@ config FS_ENCRYPTION_ALGS
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_CTS
select CRYPTO_ECB
select CRYPTO_XTS
config FS_ENCRYPTION_INLINE_CRYPT

View File

@@ -70,11 +70,9 @@ static void fscrypt_zeroout_range_end_io(struct bio *bio)
}
static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
pgoff_t lblk, sector_t sector,
unsigned int len)
loff_t pos, sector_t sector,
u64 len)
{
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits);
struct fscrypt_zero_done done = {
.pending = ATOMIC_INIT(1),
.done = COMPLETION_INITIALIZER_ONSTACK(done.done),
@@ -89,18 +87,16 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
bio->bi_iter.bi_sector = sector;
bio->bi_private = &done;
bio->bi_end_io = fscrypt_zeroout_range_end_io;
fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS);
fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_NOFS);
for (n = 0; n < BIO_MAX_VECS; n++) {
unsigned int blocks_this_page =
min(len, blocks_per_page);
unsigned int bytes_this_page = blocks_this_page << blockbits;
unsigned int bytes_this_page = min(len, PAGE_SIZE);
__bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0);
len -= blocks_this_page;
lblk += blocks_this_page;
len -= bytes_this_page;
pos += bytes_this_page;
sector += (bytes_this_page >> SECTOR_SHIFT);
if (!len || !fscrypt_mergeable_bio(bio, inode, lblk))
if (!len || !fscrypt_mergeable_bio(bio, inode, pos))
break;
}
@@ -117,31 +113,31 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
/**
* fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file
* @inode: the file's inode
* @lblk: the first file logical block to zero out
* @pblk: the first filesystem physical block to zero out
* @len: number of blocks to zero out
* @pos: the first file position (in bytes) to zero out
* @sector: the first sector to zero out
* @len: bytes to zero out
*
* Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write
* ciphertext blocks which decrypt to the all-zeroes block. The blocks must be
* both logically and physically contiguous. It's also assumed that the
* filesystem only uses a single block device, ->s_bdev.
* filesystem only uses a single block device, ->s_bdev. @len must be a
* multiple of the file system logical block size.
*
* Note that since each block uses a different IV, this involves writing a
* different ciphertext to each block; we can't simply reuse the same one.
*
* Return: 0 on success; -errno on failure.
*/
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
int fscrypt_zeroout_range(const struct inode *inode, loff_t pos,
sector_t sector, u64 len)
{
const struct fscrypt_inode_info *ci = fscrypt_get_inode_info_raw(inode);
const unsigned int du_bits = ci->ci_data_unit_bits;
const unsigned int du_size = 1U << du_bits;
const unsigned int du_per_page_bits = PAGE_SHIFT - du_bits;
const unsigned int du_per_page = 1U << du_per_page_bits;
u64 du_index = (u64)lblk << (inode->i_blkbits - du_bits);
u64 du_remaining = (u64)len << (inode->i_blkbits - du_bits);
sector_t sector = pblk << (inode->i_blkbits - SECTOR_SHIFT);
u64 du_index = pos >> du_bits;
u64 du_remaining = len >> du_bits;
struct page *pages[16]; /* write up to 16 pages at a time */
unsigned int nr_pages;
unsigned int i;
@@ -153,7 +149,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return 0;
if (fscrypt_inode_uses_inline_crypto(inode))
return fscrypt_zeroout_range_inline_crypt(inode, lblk, sector,
return fscrypt_zeroout_range_inline_crypt(inode, pos, sector,
len);
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);

View File

@@ -278,9 +278,6 @@ struct fscrypt_inode_info {
*/
u8 ci_data_unit_bits;
/* Cached value: log2 of number of data units per FS block */
u8 ci_data_units_per_block_bits;
/* Hashed inode number. Only set for IV_INO_LBLK_32 */
u32 ci_hashed_ino;

View File

@@ -268,14 +268,12 @@ bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
u64 lblk_num,
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
loff_t pos, u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
{
u64 index = lblk_num << ci->ci_data_units_per_block_bits;
union fscrypt_iv iv;
int i;
fscrypt_generate_iv(&iv, index, ci);
fscrypt_generate_iv(&iv, pos >> ci->ci_data_unit_bits, ci);
BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
@@ -287,7 +285,7 @@ static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
* fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
* @bio: a bio which will eventually be submitted to the file
* @inode: the file's inode
* @first_lblk: the first file logical block number in the I/O
* @pos: the first file position (in bytes) in the I/O
* @gfp_mask: memory allocation flags - these must be a waiting mask so that
* bio_crypt_set_ctx can't fail.
*
@@ -300,7 +298,7 @@ static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
* The encryption context will be freed automatically when the bio is freed.
*/
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
loff_t pos, gfp_t gfp_mask)
{
const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
@@ -309,61 +307,16 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
return;
ci = fscrypt_get_inode_info_raw(inode);
fscrypt_generate_dun(ci, first_lblk, dun);
fscrypt_generate_dun(ci, pos, dun);
bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
/* Extract the inode and logical block number from a buffer_head. */
static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
const struct inode **inode_ret,
u64 *lblk_num_ret)
{
struct folio *folio = bh->b_folio;
const struct address_space *mapping;
const struct inode *inode;
/*
* The ext4 journal (jbd2) can submit a buffer_head it directly created
* for a non-pagecache page. fscrypt doesn't care about these.
*/
mapping = folio_mapping(folio);
if (!mapping)
return false;
inode = mapping->host;
*inode_ret = inode;
*lblk_num_ret = (folio_pos(folio) + bh_offset(bh)) >> inode->i_blkbits;
return true;
}
/**
* fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
* crypto
* @bio: a bio which will eventually be submitted to the file
* @first_bh: the first buffer_head for which I/O will be submitted
* @gfp_mask: memory allocation flags
*
* Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
* of an inode and block number directly.
*/
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask)
{
const struct inode *inode;
u64 first_lblk;
if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
}
EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
/**
* fscrypt_mergeable_bio() - test whether data can be added to a bio
* @bio: the bio being built up
* @inode: the inode for the next part of the I/O
* @next_lblk: the next file logical block number in the I/O
* @pos: the next file position (in bytes) in the I/O
*
* When building a bio which may contain data which should undergo inline
* encryption (or decryption) via fscrypt, filesystems should call this function
@@ -381,7 +334,7 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk)
loff_t pos)
{
const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
const struct fscrypt_inode_info *ci;
@@ -401,34 +354,11 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
if (bc->bc_key != ci->ci_enc_key.blk_key)
return false;
fscrypt_generate_dun(ci, next_lblk, next_dun);
fscrypt_generate_dun(ci, pos, next_dun);
return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
/**
* fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
* @bio: the bio being built up
* @next_bh: the next buffer_head for which I/O will be submitted
*
* Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
* an inode and block number directly.
*
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh)
{
const struct inode *inode;
u64 next_lblk;
if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
return !bio->bi_crypt_context;
return fscrypt_mergeable_bio(bio, inode, next_lblk);
}
EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
/**
* fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an
* inode, as far as encryption is concerned

View File

@@ -609,8 +609,6 @@ fscrypt_setup_encryption_info(struct inode *inode,
crypt_info->ci_data_unit_bits =
fscrypt_policy_du_bits(&crypt_info->ci_policy, inode);
crypt_info->ci_data_units_per_block_bits =
inode->i_blkbits - crypt_info->ci_data_unit_bits;
res = setup_file_encryption_key(crypt_info, need_dirhash_key, &mk);
if (res)

View File

@@ -20,11 +20,10 @@
* managed alongside the master keys in the filesystem-level keyring)
*/
#include <crypto/skcipher.h>
#include <crypto/aes.h>
#include <crypto/utils.h>
#include <keys/user-type.h>
#include <linux/hashtable.h>
#include <linux/scatterlist.h>
#include "fscrypt_private.h"
@@ -32,48 +31,6 @@
static DEFINE_HASHTABLE(fscrypt_direct_keys, 6); /* 6 bits = 64 buckets */
static DEFINE_SPINLOCK(fscrypt_direct_keys_lock);
/*
* v1 key derivation function. This generates the derived key by encrypting the
* master key with AES-128-ECB using the nonce as the AES key. This provides a
* unique derived key with sufficient entropy for each inode. However, it's
* nonstandard, non-extensible, doesn't evenly distribute the entropy from the
* master key, and is trivially reversible: an attacker who compromises a
* derived key can "decrypt" it to get back to the master key, then derive any
* other key. For all new code, use HKDF instead.
*
* The master key must be at least as long as the derived key. If the master
* key is longer, then only the first 'derived_keysize' bytes are used.
*/
static int derive_key_aes(const u8 *master_key,
const u8 nonce[FSCRYPT_FILE_NONCE_SIZE],
u8 *derived_key, unsigned int derived_keysize)
{
struct crypto_sync_skcipher *tfm;
int err;
tfm = crypto_alloc_sync_skcipher("ecb(aes)", 0, FSCRYPT_CRYPTOAPI_MASK);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
err = crypto_sync_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE);
if (err == 0) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist src_sg, dst_sg;
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
sg_init_one(&src_sg, master_key, derived_keysize);
sg_init_one(&dst_sg, derived_key, derived_keysize);
skcipher_request_set_crypt(req, &src_sg, &dst_sg,
derived_keysize, NULL);
err = crypto_skcipher_encrypt(req);
}
crypto_free_sync_skcipher(tfm);
return err;
}
/*
* Search the current task's subscribed keyrings for a "logon" key with
* description prefix:descriptor, and if found acquire a read lock on it and
@@ -255,29 +212,41 @@ static int setup_v1_file_key_direct(struct fscrypt_inode_info *ci,
return 0;
}
/* v1 policy, !DIRECT_KEY: derive the file's encryption key */
/*
* v1 policy, !DIRECT_KEY: derive the file's encryption key.
*
* The v1 key derivation function generates the derived key by encrypting the
* master key with AES-128-ECB using the file's nonce as the AES key. This
* provides a unique derived key with sufficient entropy for each inode.
* However, it's nonstandard, non-extensible, doesn't evenly distribute the
* entropy from the master key, and is trivially reversible: an attacker who
* compromises a derived key can "decrypt" it to get back to the master key,
* then derive any other key. For all new code, use HKDF instead.
*
* The master key must be at least as long as the derived key. If the master
* key is longer, then only the first ci->ci_mode->keysize bytes are used.
*/
static int setup_v1_file_key_derived(struct fscrypt_inode_info *ci,
const u8 *raw_master_key)
{
u8 *derived_key;
const unsigned int derived_keysize = ci->ci_mode->keysize;
u8 derived_key[FSCRYPT_MAX_RAW_KEY_SIZE];
struct aes_enckey aes;
int err;
/*
* This cannot be a stack buffer because it will be passed to the
* scatterlist crypto API during derive_key_aes().
*/
derived_key = kmalloc(ci->ci_mode->keysize, GFP_KERNEL);
if (!derived_key)
return -ENOMEM;
if (WARN_ON_ONCE(derived_keysize > FSCRYPT_MAX_RAW_KEY_SIZE ||
derived_keysize % AES_BLOCK_SIZE != 0))
return -EINVAL;
err = derive_key_aes(raw_master_key, ci->ci_nonce,
derived_key, ci->ci_mode->keysize);
if (err)
goto out;
static_assert(FSCRYPT_FILE_NONCE_SIZE == AES_KEYSIZE_128);
aes_prepareenckey(&aes, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE);
for (unsigned int i = 0; i < derived_keysize; i += AES_BLOCK_SIZE)
aes_encrypt(&aes, &derived_key[i], &raw_master_key[i]);
err = fscrypt_set_per_file_enc_key(ci, derived_key);
out:
kfree_sensitive(derived_key);
memzero_explicit(derived_key, derived_keysize);
/* No need to zeroize 'aes', as its key is not secret. */
return err;
}

View File

@@ -417,7 +417,10 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
KUNIT_STATIC_STUB_REDIRECT(ext4_issue_zeroout, inode, lblk, pblk, len);
if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
return fscrypt_zeroout_range(inode, lblk, pblk, len);
return fscrypt_zeroout_range(inode,
(loff_t)lblk << inode->i_blkbits,
pblk << (inode->i_blkbits - SECTOR_SHIFT),
(u64)len << inode->i_blkbits);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
if (ret > 0)

View File

@@ -416,6 +416,8 @@ void ext4_io_submit_init(struct ext4_io_submit *io,
}
static void io_submit_init_bio(struct ext4_io_submit *io,
struct inode *inode,
struct folio *folio,
struct buffer_head *bh)
{
struct bio *bio;
@@ -425,30 +427,42 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
*/
bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
fscrypt_set_bio_crypt_ctx(bio, inode, folio_pos(folio) + bh_offset(bh),
GFP_NOIO);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
bio->bi_write_hint = inode->i_write_hint;
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
wbc_init_bio(io->io_wbc, bio);
}
static bool io_submit_need_new_bio(struct ext4_io_submit *io,
struct inode *inode,
struct folio *folio,
struct buffer_head *bh)
{
if (bh->b_blocknr != io->io_next_block)
return true;
if (!fscrypt_mergeable_bio(io->io_bio, inode,
folio_pos(folio) + bh_offset(bh)))
return true;
return false;
}
static void io_submit_add_bh(struct ext4_io_submit *io,
struct inode *inode,
struct folio *folio,
struct folio *io_folio,
struct buffer_head *bh)
{
if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
!fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
if (io->io_bio && io_submit_need_new_bio(io, inode, folio, bh)) {
submit_and_retry:
ext4_io_submit(io);
}
if (io->io_bio == NULL) {
io_submit_init_bio(io, bh);
io->io_bio->bi_write_hint = inode->i_write_hint;
}
if (io->io_bio == NULL)
io_submit_init_bio(io, inode, folio, bh);
if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
goto submit_and_retry;
wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);

View File

@@ -215,11 +215,11 @@ static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
sector_t last_block_in_bio = 0;
const unsigned blkbits = inode->i_blkbits;
const unsigned blocksize = 1 << blkbits;
sector_t next_block;
sector_t block_in_file;
sector_t last_block;
sector_t last_block_in_file;
sector_t first_block;
loff_t pos;
unsigned page_block;
struct block_device *bdev = inode->i_sb->s_bdev;
int length;
@@ -249,7 +249,8 @@ static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
blocks_per_folio = folio_size(folio) >> blkbits;
first_hole = blocks_per_folio;
block_in_file = next_block = EXT4_PG_TO_LBLK(inode, folio->index);
pos = folio_pos(folio);
block_in_file = pos >> blkbits;
last_block = EXT4_PG_TO_LBLK(inode, folio->index + nr_pages);
last_block_in_file = (ext4_readpage_limit(inode) +
blocksize - 1) >> blkbits;
@@ -342,7 +343,7 @@ static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
* BIO off first?
*/
if (bio && (last_block_in_bio != first_block - 1 ||
!fscrypt_mergeable_bio(bio, inode, next_block))) {
!fscrypt_mergeable_bio(bio, inode, pos))) {
submit_and_realloc:
blk_crypto_submit_bio(bio);
bio = NULL;
@@ -354,8 +355,7 @@ static int ext4_mpage_readpages(struct inode *inode, struct fsverity_info *vi,
*/
bio = bio_alloc(bdev, bio_max_segs(nr_pages),
REQ_OP_READ, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
ext4_set_bio_post_read_ctx(bio, inode, vi);
bio->bi_iter.bi_sector = first_block << (blkbits - 9);
bio->bi_end_io = mpage_end_io;

View File

@@ -527,7 +527,9 @@ static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
* read/write raw data without encryption.
*/
if (!fio || !fio->encrypted_page)
fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
fscrypt_set_bio_crypt_ctx(bio, inode,
(loff_t)first_idx << inode->i_blkbits,
gfp_mask);
}
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -541,7 +543,8 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
if (fio && fio->encrypted_page)
return !bio_has_crypt_ctx(bio);
return fscrypt_mergeable_bio(bio, inode, next_idx);
return fscrypt_mergeable_bio(bio, inode,
(loff_t)next_idx << inode->i_blkbits);
}
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,

View File

@@ -4162,7 +4162,9 @@ static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
if (IS_ENCRYPTED(inode))
ret = fscrypt_zeroout_range(inode, off, block, len);
ret = fscrypt_zeroout_range(inode,
(loff_t)off << inode->i_blkbits, sector,
(u64)len << inode->i_blkbits);
else
ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
GFP_NOFS, 0);

View File

@@ -315,8 +315,7 @@ static int iomap_dio_zero(const struct iomap_iter *iter, struct iomap_dio *dio,
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs,
REQ_OP_WRITE | REQ_SYNC | REQ_IDLE);
fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, inode, pos, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
@@ -346,8 +345,7 @@ static ssize_t iomap_dio_bio_iter_one(struct iomap_iter *iter,
nr_vecs = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, op);
fscrypt_set_bio_crypt_ctx(bio, iter->inode,
pos >> iter->inode->i_blkbits, GFP_KERNEL);
fscrypt_set_bio_crypt_ctx(bio, iter->inode, pos, GFP_KERNEL);
bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos);
bio->bi_write_hint = iter->inode->i_write_hint;
bio->bi_ioprio = dio->iocb->ki_ioprio;

View File

@@ -450,8 +450,8 @@ u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name);
/* bio.c */
bool fscrypt_decrypt_bio(struct bio *bio);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len);
int fscrypt_zeroout_range(const struct inode *inode, loff_t pos,
sector_t sector, u64 len);
/* hooks.c */
int fscrypt_file_open(struct inode *inode, struct file *filp);
@@ -755,8 +755,8 @@ static inline bool fscrypt_decrypt_bio(struct bio *bio)
return true;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
static inline int fscrypt_zeroout_range(const struct inode *inode, loff_t pos,
sector_t sector, u64 len)
{
return -EOPNOTSUPP;
}
@@ -865,19 +865,11 @@ static inline void fscrypt_set_ops(struct super_block *sb,
bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode);
void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode, u64 first_lblk,
gfp_t gfp_mask);
void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask);
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
loff_t pos, gfp_t gfp_mask);
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
u64 next_lblk);
bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh);
loff_t pos);
bool fscrypt_dio_supported(struct inode *inode);
@@ -892,22 +884,11 @@ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio,
const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask) { }
static inline void fscrypt_set_bio_crypt_ctx_bh(
struct bio *bio,
const struct buffer_head *first_bh,
gfp_t gfp_mask) { }
loff_t pos, gfp_t gfp_mask) { }
static inline bool fscrypt_mergeable_bio(struct bio *bio,
const struct inode *inode,
u64 next_lblk)
{
return true;
}
static inline bool fscrypt_mergeable_bio_bh(struct bio *bio,
const struct buffer_head *next_bh)
loff_t pos)
{
return true;
}