mirror of
https://github.com/torvalds/linux.git
synced 2026-05-05 23:05:25 -04:00
bcachefs: Fix a bug with the journal_seq_blacklist mechanism
Previously, we would start doing btree updates before writing the first journal entry; if this was after an unclean shutdown, this could cause those btree updates to not be blacklisted. Also, move some code to headers for userspace debug tools. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
committed by
Kent Overstreet
parent
00c24f53b5
commit
9f115ce9e9
@@ -597,34 +597,6 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
|
||||
bch2_btree_iter_reinit_node(iter, b);
|
||||
}
|
||||
|
||||
static struct nonce btree_nonce(struct bset *i, unsigned offset)
|
||||
{
|
||||
return (struct nonce) {{
|
||||
[0] = cpu_to_le32(offset),
|
||||
[1] = ((__le32 *) &i->seq)[0],
|
||||
[2] = ((__le32 *) &i->seq)[1],
|
||||
[3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
|
||||
}};
|
||||
}
|
||||
|
||||
static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
|
||||
{
|
||||
struct nonce nonce = btree_nonce(i, offset);
|
||||
|
||||
if (!offset) {
|
||||
struct btree_node *bn = container_of(i, struct btree_node, keys);
|
||||
unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
|
||||
|
||||
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
|
||||
bytes);
|
||||
|
||||
nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
|
||||
}
|
||||
|
||||
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
|
||||
vstruct_end(i) - (void *) i->_data);
|
||||
}
|
||||
|
||||
static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
|
||||
struct btree *b, struct bset *i,
|
||||
unsigned offset, int write)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include "bkey_methods.h"
|
||||
#include "bset.h"
|
||||
#include "btree_locking.h"
|
||||
#include "checksum.h"
|
||||
#include "extents.h"
|
||||
#include "io_types.h"
|
||||
|
||||
@@ -82,6 +83,34 @@ static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct nonce btree_nonce(struct bset *i, unsigned offset)
|
||||
{
|
||||
return (struct nonce) {{
|
||||
[0] = cpu_to_le32(offset),
|
||||
[1] = ((__le32 *) &i->seq)[0],
|
||||
[2] = ((__le32 *) &i->seq)[1],
|
||||
[3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
|
||||
}};
|
||||
}
|
||||
|
||||
static inline void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
|
||||
{
|
||||
struct nonce nonce = btree_nonce(i, offset);
|
||||
|
||||
if (!offset) {
|
||||
struct btree_node *bn = container_of(i, struct btree_node, keys);
|
||||
unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
|
||||
|
||||
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
|
||||
bytes);
|
||||
|
||||
nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
|
||||
}
|
||||
|
||||
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
|
||||
vstruct_end(i) - (void *) i->_data);
|
||||
}
|
||||
|
||||
void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
|
||||
|
||||
void bch2_btree_build_aux_trees(struct btree *);
|
||||
|
||||
@@ -36,15 +36,6 @@
|
||||
* that bset, until that btree node is rewritten.
|
||||
*/
|
||||
|
||||
static unsigned
|
||||
blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
|
||||
{
|
||||
return bl
|
||||
? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
|
||||
sizeof(struct journal_seq_blacklist_entry))
|
||||
: 0;
|
||||
}
|
||||
|
||||
static unsigned sb_blacklist_u64s(unsigned nr)
|
||||
{
|
||||
struct bch_sb_field_journal_seq_blacklist *bl;
|
||||
|
||||
@@ -2,6 +2,15 @@
|
||||
#ifndef _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
|
||||
#define _BCACHEFS_JOURNAL_SEQ_BLACKLIST_H
|
||||
|
||||
static inline unsigned
|
||||
blacklist_nr_entries(struct bch_sb_field_journal_seq_blacklist *bl)
|
||||
{
|
||||
return bl
|
||||
? ((vstruct_end(&bl->field) - (void *) &bl->start[0]) /
|
||||
sizeof(struct journal_seq_blacklist_entry))
|
||||
: 0;
|
||||
}
|
||||
|
||||
bool bch2_journal_seq_is_blacklisted(struct bch_fs *, u64, bool);
|
||||
int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64, u64);
|
||||
int bch2_blacklist_table_initialize(struct bch_fs *);
|
||||
|
||||
@@ -1039,6 +1039,11 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
}
|
||||
|
||||
journal_seq += 4;
|
||||
|
||||
/*
|
||||
* The superblock needs to be written before we do any btree
|
||||
* node writes: it will be in the read_write() path
|
||||
*/
|
||||
}
|
||||
|
||||
ret = bch2_blacklist_table_initialize(c);
|
||||
|
||||
@@ -352,8 +352,8 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
|
||||
{
|
||||
bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
|
||||
|
||||
bch2_fs_read_only_async(c);
|
||||
bch2_journal_halt(&c->journal);
|
||||
bch2_fs_read_only_async(c);
|
||||
|
||||
wake_up(&bch_read_only_wait);
|
||||
return ret;
|
||||
@@ -410,6 +410,13 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* We need to write out a journal entry before we start doing btree
|
||||
* updates, to ensure that on unclean shutdown new journal blacklist
|
||||
* entries are created:
|
||||
*/
|
||||
bch2_journal_meta(&c->journal);
|
||||
|
||||
clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
|
||||
|
||||
for_each_rw_member(ca, c, i)
|
||||
|
||||
Reference in New Issue
Block a user