2023-01-25 20:00:44 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-01-04 19:23:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
|
|
|
|
* Copyright (C) 2016-2017 Milan Broz
|
|
|
|
* Copyright (C) 2016-2017 Mikulas Patocka
|
|
|
|
*
|
|
|
|
* This file is released under the GPL.
|
|
|
|
*/
|
|
|
|
|
2020-02-28 23:11:53 +00:00
|
|
|
#include "dm-bio-record.h"
|
|
|
|
|
locking/atomics, dm-integrity: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't pick up some uses, including those
in dm-integrity.c. As a preparatory step, this patch converts the driver
to use {READ,WRITE}_ONCE() consistently.
At the same time, this patch adds the missing include of
<linux/compiler.h> necessary for the {READ,WRITE}_ONCE() definitions.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-1-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 21:07:11 +00:00
|
|
|
#include <linux/compiler.h>
|
2017-01-04 19:23:53 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device-mapper.h>
|
|
|
|
#include <linux/dm-io.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/sort.h>
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/random.h>
|
2019-04-29 12:57:25 +00:00
|
|
|
#include <linux/reboot.h>
|
2017-01-04 19:23:53 +00:00
|
|
|
#include <crypto/hash.h>
|
|
|
|
#include <crypto/skcipher.h>
|
|
|
|
#include <linux/async_tx.h>
|
2018-03-15 20:02:31 +00:00
|
|
|
#include <linux/dm-bufio.h>
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2021-09-04 09:59:29 +00:00
|
|
|
#include "dm-audit.h"
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
#define DM_MSG_PREFIX "integrity"
|
|
|
|
|
|
|
|
#define DEFAULT_INTERLEAVE_SECTORS 32768
|
|
|
|
#define DEFAULT_JOURNAL_SIZE_FACTOR 7
|
2019-04-29 12:57:24 +00:00
|
|
|
#define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
|
2017-01-04 19:23:53 +00:00
|
|
|
#define DEFAULT_BUFFER_SECTORS 128
|
|
|
|
#define DEFAULT_JOURNAL_WATERMARK 50
|
|
|
|
#define DEFAULT_SYNC_MSEC 10000
|
2023-06-26 14:44:34 +00:00
|
|
|
#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
|
2017-04-18 20:51:50 +00:00
|
|
|
#define MIN_LOG2_INTERLEAVE_SECTORS 3
|
|
|
|
#define MAX_LOG2_INTERLEAVE_SECTORS 31
|
2017-01-04 19:23:53 +00:00
|
|
|
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
|
2023-06-26 14:44:34 +00:00
|
|
|
#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
|
2018-07-03 18:13:33 +00:00
|
|
|
#define RECALC_WRITE_SUPER 16
|
2019-04-29 12:57:24 +00:00
|
|
|
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
|
|
|
|
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
|
2020-03-22 19:42:26 +00:00
|
|
|
#define DISCARD_FILLER 0xf6
|
2021-01-21 15:09:32 +00:00
|
|
|
#define SALT_SIZE 16
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Warning - DEBUG_PRINT prints security-sensitive data to the log,
|
|
|
|
* so it should not be enabled in the official kernel
|
|
|
|
*/
|
|
|
|
//#define DEBUG_PRINT
|
|
|
|
//#define INTERNAL_VERIFY
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On disk structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define SB_MAGIC "integrt"
|
2018-07-03 18:13:31 +00:00
|
|
|
#define SB_VERSION_1 1
|
|
|
|
#define SB_VERSION_2 2
|
2019-04-29 12:57:24 +00:00
|
|
|
#define SB_VERSION_3 3
|
2019-11-13 11:48:16 +00:00
|
|
|
#define SB_VERSION_4 4
|
2021-01-21 15:09:32 +00:00
|
|
|
#define SB_VERSION_5 5
|
2017-01-04 19:23:53 +00:00
|
|
|
#define SB_SECTORS 8
|
2017-04-18 20:51:52 +00:00
|
|
|
#define MAX_SECTORS_PER_BLOCK 8
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
struct superblock {
|
|
|
|
__u8 magic[8];
|
|
|
|
__u8 version;
|
|
|
|
__u8 log2_interleave_sectors;
|
2021-05-11 15:41:00 +00:00
|
|
|
__le16 integrity_tag_size;
|
|
|
|
__le32 journal_sections;
|
|
|
|
__le64 provided_data_sectors; /* userspace uses this value */
|
|
|
|
__le32 flags;
|
2017-04-18 20:51:52 +00:00
|
|
|
__u8 log2_sectors_per_block;
|
2019-04-29 12:57:24 +00:00
|
|
|
__u8 log2_blocks_per_bitmap_bit;
|
|
|
|
__u8 pad[2];
|
2021-05-11 15:41:00 +00:00
|
|
|
__le64 recalc_sector;
|
2021-01-21 15:09:32 +00:00
|
|
|
__u8 pad2[8];
|
|
|
|
__u8 salt[SALT_SIZE];
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SB_FLAG_HAVE_JOURNAL_MAC 0x1
|
2018-07-03 18:13:33 +00:00
|
|
|
#define SB_FLAG_RECALCULATING 0x2
|
2019-04-29 12:57:24 +00:00
|
|
|
#define SB_FLAG_DIRTY_BITMAP 0x4
|
2019-11-13 11:48:16 +00:00
|
|
|
#define SB_FLAG_FIXED_PADDING 0x8
|
2021-01-21 15:09:32 +00:00
|
|
|
#define SB_FLAG_FIXED_HMAC 0x10
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
#define JOURNAL_ENTRY_ROUNDUP 8
|
|
|
|
|
2021-05-11 15:41:00 +00:00
|
|
|
typedef __le64 commit_id_t;
|
2017-01-04 19:23:53 +00:00
|
|
|
#define JOURNAL_MAC_PER_SECTOR 8
|
|
|
|
|
|
|
|
struct journal_entry {
|
|
|
|
union {
|
|
|
|
struct {
|
2021-05-11 15:41:00 +00:00
|
|
|
__le32 sector_lo;
|
|
|
|
__le32 sector_hi;
|
2017-01-04 19:23:53 +00:00
|
|
|
} s;
|
2021-05-11 15:41:00 +00:00
|
|
|
__le64 sector;
|
2017-01-04 19:23:53 +00:00
|
|
|
} u;
|
2020-05-07 18:51:58 +00:00
|
|
|
commit_id_t last_bytes[];
|
2017-04-18 20:51:52 +00:00
|
|
|
/* __u8 tag[0]; */
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
#define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
#if BITS_PER_LONG == 64
|
locking/atomics, dm-integrity: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't pick up some uses, including those
in dm-integrity.c. As a preparatory step, this patch converts the driver
to use {READ,WRITE}_ONCE() consistently.
At the same time, this patch adds the missing include of
<linux/compiler.h> necessary for the {READ,WRITE}_ONCE() definitions.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-1-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 21:07:11 +00:00
|
|
|
#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
|
2017-01-04 19:23:53 +00:00
|
|
|
#else
|
2019-04-05 16:08:59 +00:00
|
|
|
#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
|
2017-01-04 19:23:53 +00:00
|
|
|
#endif
|
2019-04-05 16:08:59 +00:00
|
|
|
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
|
2017-01-04 19:23:53 +00:00
|
|
|
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
|
2023-02-07 21:07:22 +00:00
|
|
|
#define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
|
2017-01-04 19:23:53 +00:00
|
|
|
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
|
2023-02-07 21:07:22 +00:00
|
|
|
#define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
#define JOURNAL_BLOCK_SECTORS 8
|
|
|
|
#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
|
|
|
|
#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
|
|
|
|
|
|
|
|
struct journal_sector {
|
2021-12-13 22:33:25 +00:00
|
|
|
struct_group(sectors,
|
|
|
|
__u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
|
|
|
|
__u8 mac[JOURNAL_MAC_PER_SECTOR];
|
|
|
|
);
|
2017-01-04 19:23:53 +00:00
|
|
|
commit_id_t commit_id;
|
|
|
|
};
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
#define METADATA_PADDING_SECTORS 8
|
|
|
|
|
|
|
|
#define N_COMMIT_IDS 4
|
|
|
|
|
|
|
|
static unsigned char prev_commit_seq(unsigned char seq)
|
|
|
|
{
|
|
|
|
return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned char next_commit_seq(unsigned char seq)
|
|
|
|
{
|
|
|
|
return (seq + 1) % N_COMMIT_IDS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In-memory structures
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct journal_node {
|
|
|
|
struct rb_node node;
|
|
|
|
sector_t sector;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct alg_spec {
|
|
|
|
char *alg_string;
|
|
|
|
char *key_string;
|
|
|
|
__u8 *key;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int key_size;
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct dm_integrity_c {
|
|
|
|
struct dm_dev *dev;
|
2018-07-03 18:13:30 +00:00
|
|
|
struct dm_dev *meta_dev;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int tag_size;
|
2017-01-04 19:23:53 +00:00
|
|
|
__s8 log2_tag_size;
|
|
|
|
sector_t start;
|
2018-05-20 22:25:53 +00:00
|
|
|
mempool_t journal_io_mempool;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct dm_io_client *io;
|
|
|
|
struct dm_bufio_client *bufio;
|
|
|
|
struct workqueue_struct *metadata_wq;
|
|
|
|
struct superblock *sb;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_pages;
|
|
|
|
unsigned int n_bitmap_blocks;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
struct page_list *journal;
|
|
|
|
struct page_list *journal_io;
|
|
|
|
struct page_list *journal_xor;
|
2019-04-29 12:57:24 +00:00
|
|
|
struct page_list *recalc_bitmap;
|
|
|
|
struct page_list *may_write_bitmap;
|
|
|
|
struct bitmap_block_status *bbs;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int bitmap_flush_interval;
|
2019-04-29 12:57:26 +00:00
|
|
|
int synchronous_mode;
|
|
|
|
struct bio_list synchronous_bios;
|
2019-04-29 12:57:24 +00:00
|
|
|
struct delayed_work bitmap_flush_work;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
struct crypto_skcipher *journal_crypt;
|
|
|
|
struct scatterlist **journal_scatterlist;
|
|
|
|
struct scatterlist **journal_io_scatterlist;
|
|
|
|
struct skcipher_request **sk_requests;
|
|
|
|
|
|
|
|
struct crypto_shash *journal_mac;
|
|
|
|
|
|
|
|
struct journal_node *journal_tree;
|
|
|
|
struct rb_root journal_tree_root;
|
|
|
|
|
|
|
|
sector_t provided_data_sectors;
|
|
|
|
|
|
|
|
unsigned short journal_entry_size;
|
|
|
|
unsigned char journal_entries_per_sector;
|
|
|
|
unsigned char journal_section_entries;
|
2017-04-18 20:51:52 +00:00
|
|
|
unsigned short journal_section_sectors;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_sections;
|
|
|
|
unsigned int journal_entries;
|
2018-07-03 18:13:30 +00:00
|
|
|
sector_t data_device_sectors;
|
|
|
|
sector_t meta_device_sectors;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int initial_sectors;
|
|
|
|
unsigned int metadata_run;
|
2017-01-04 19:23:53 +00:00
|
|
|
__s8 log2_metadata_run;
|
|
|
|
__u8 log2_buffer_sectors;
|
2017-04-18 20:51:52 +00:00
|
|
|
__u8 sectors_per_block;
|
2019-04-29 12:57:24 +00:00
|
|
|
__u8 log2_blocks_per_bitmap_bit;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
unsigned char mode;
|
|
|
|
|
|
|
|
int failed;
|
|
|
|
|
|
|
|
struct crypto_shash *internal_hash;
|
|
|
|
|
2020-02-24 09:20:28 +00:00
|
|
|
struct dm_target *ti;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
/* these variables are locked with endio_wait.lock */
|
|
|
|
struct rb_root in_progress;
|
2018-07-03 18:13:27 +00:00
|
|
|
struct list_head wait_list;
|
2017-01-04 19:23:53 +00:00
|
|
|
wait_queue_head_t endio_wait;
|
|
|
|
struct workqueue_struct *wait_wq;
|
2020-02-17 12:43:03 +00:00
|
|
|
struct workqueue_struct *offload_wq;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
unsigned char commit_seq;
|
|
|
|
commit_id_t commit_ids[N_COMMIT_IDS];
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int committed_section;
|
|
|
|
unsigned int n_committed_sections;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int uncommitted_section;
|
|
|
|
unsigned int n_uncommitted_sections;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int free_section;
|
2017-01-04 19:23:53 +00:00
|
|
|
unsigned char free_section_entry;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int free_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int free_sectors_threshold;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
struct workqueue_struct *commit_wq;
|
|
|
|
struct work_struct commit_work;
|
|
|
|
|
|
|
|
struct workqueue_struct *writer_wq;
|
|
|
|
struct work_struct writer_work;
|
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
struct workqueue_struct *recalc_wq;
|
|
|
|
struct work_struct recalc_work;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
struct bio_list flush_bio_list;
|
|
|
|
|
|
|
|
unsigned long autocommit_jiffies;
|
|
|
|
struct timer_list autocommit_timer;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int autocommit_msec;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
wait_queue_head_t copy_to_journal_wait;
|
|
|
|
|
|
|
|
struct completion crypto_backoff;
|
|
|
|
|
2022-11-15 17:51:50 +00:00
|
|
|
bool wrote_to_journal;
|
2017-01-04 19:23:53 +00:00
|
|
|
bool journal_uptodate;
|
|
|
|
bool just_formatted;
|
2019-04-29 12:57:24 +00:00
|
|
|
bool recalculate_flag;
|
2021-03-23 14:59:45 +00:00
|
|
|
bool reset_recalculate_flag;
|
2020-03-22 19:42:26 +00:00
|
|
|
bool discard;
|
2021-01-20 18:59:11 +00:00
|
|
|
bool fix_padding;
|
2021-01-21 15:09:32 +00:00
|
|
|
bool fix_hmac;
|
2021-01-20 18:59:11 +00:00
|
|
|
bool legacy_recalculate;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
struct alg_spec internal_hash_alg;
|
|
|
|
struct alg_spec journal_crypt_alg;
|
|
|
|
struct alg_spec journal_mac_alg;
|
2017-07-21 16:00:00 +00:00
|
|
|
|
|
|
|
atomic64_t number_of_mismatches;
|
2019-04-29 12:57:25 +00:00
|
|
|
|
|
|
|
struct notifier_block reboot_notifier;
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct dm_integrity_range {
|
|
|
|
sector_t logical_sector;
|
2019-04-29 12:57:21 +00:00
|
|
|
sector_t n_sectors;
|
2018-07-03 18:13:27 +00:00
|
|
|
bool waiting;
|
|
|
|
union {
|
|
|
|
struct rb_node node;
|
|
|
|
struct {
|
|
|
|
struct task_struct *task;
|
|
|
|
struct list_head wait_entry;
|
|
|
|
};
|
|
|
|
};
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct dm_integrity_io {
|
|
|
|
struct work_struct work;
|
|
|
|
|
|
|
|
struct dm_integrity_c *ic;
|
2022-07-14 18:06:27 +00:00
|
|
|
enum req_op op;
|
2017-01-04 19:23:53 +00:00
|
|
|
bool fua;
|
|
|
|
|
|
|
|
struct dm_integrity_range range;
|
|
|
|
|
|
|
|
sector_t metadata_block;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int metadata_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
atomic_t in_flight;
|
2017-06-03 07:38:06 +00:00
|
|
|
blk_status_t bi_status;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
struct completion *completion;
|
|
|
|
|
2020-02-28 23:11:53 +00:00
|
|
|
struct dm_bio_details bio_details;
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct journal_completion {
|
|
|
|
struct dm_integrity_c *ic;
|
|
|
|
atomic_t in_flight;
|
|
|
|
struct completion comp;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct journal_io {
|
|
|
|
struct dm_integrity_range range;
|
|
|
|
struct journal_completion *comp;
|
|
|
|
};
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
struct bitmap_block_status {
|
|
|
|
struct work_struct work;
|
|
|
|
struct dm_integrity_c *ic;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int idx;
|
2019-04-29 12:57:24 +00:00
|
|
|
unsigned long *bitmap;
|
|
|
|
struct bio_list bio_queue;
|
|
|
|
spinlock_t bio_queue_lock;
|
|
|
|
|
|
|
|
};
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
static struct kmem_cache *journal_io_cache;
|
|
|
|
|
|
|
|
#define JOURNAL_IO_MEMPOOL 32
|
|
|
|
|
|
|
|
#ifdef DEBUG_PRINT
|
2023-06-12 21:47:51 +00:00
|
|
|
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
|
|
|
|
#define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \
|
|
|
|
len ? ": " : "", len, bytes)
|
2017-01-04 19:23:53 +00:00
|
|
|
#else
|
|
|
|
#define DEBUG_print(x, ...) do { } while (0)
|
|
|
|
#define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2019-09-16 15:44:29 +00:00
|
|
|
static void dm_integrity_prepare(struct request *rq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
/*
|
|
|
|
* DM Integrity profile, protection is performed layer above (dm-crypt)
|
|
|
|
*/
|
2017-08-06 17:24:00 +00:00
|
|
|
static const struct blk_integrity_profile dm_integrity_profile = {
|
2017-01-04 19:23:53 +00:00
|
|
|
.name = "DM-DIF-EXT-TAG",
|
|
|
|
.generate_fn = NULL,
|
|
|
|
.verify_fn = NULL,
|
2019-09-16 15:44:29 +00:00
|
|
|
.prepare_fn = dm_integrity_prepare,
|
|
|
|
.complete_fn = dm_integrity_complete,
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
|
|
|
|
static void integrity_bio_wait(struct work_struct *w);
|
|
|
|
static void dm_integrity_dtr(struct dm_target *ti);
|
|
|
|
|
|
|
|
static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
|
|
|
|
{
|
2017-07-21 16:00:00 +00:00
|
|
|
if (err == -EILSEQ)
|
|
|
|
atomic64_inc(&ic->number_of_mismatches);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!cmpxchg(&ic->failed, 0, err))
|
|
|
|
DMERR("Error on %s: %d", msg, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_integrity_failed(struct dm_integrity_c *ic)
|
|
|
|
{
|
locking/atomics, dm-integrity: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't pick up some uses, including those
in dm-integrity.c. As a preparatory step, this patch converts the driver
to use {READ,WRITE}_ONCE() consistently.
At the same time, this patch adds the missing include of
<linux/compiler.h> necessary for the {READ,WRITE}_ONCE() definitions.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-1-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 21:07:11 +00:00
|
|
|
return READ_ONCE(ic->failed);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 18:59:11 +00:00
|
|
|
static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
|
|
|
|
{
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->legacy_recalculate)
|
|
|
|
return false;
|
|
|
|
if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
|
|
|
|
ic->internal_hash_alg.key || ic->journal_mac_alg.key :
|
|
|
|
ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
|
2021-01-20 18:59:11 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
|
|
|
|
unsigned int j, unsigned char seq)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Xor the number with section and sector, so that if a piece of
|
|
|
|
* journal is written at wrong place, it is detected.
|
|
|
|
*/
|
|
|
|
return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
|
|
|
|
sector_t *area, sector_t *offset)
|
|
|
|
{
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev) {
|
|
|
|
__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
|
|
|
|
*area = data_sector >> log2_interleave_sectors;
|
2023-01-25 20:14:58 +00:00
|
|
|
*offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
|
2018-07-03 18:13:30 +00:00
|
|
|
} else {
|
|
|
|
*area = 0;
|
|
|
|
*offset = data_sector;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
#define sector_to_block(ic, n) \
|
|
|
|
do { \
|
2023-01-25 20:14:58 +00:00
|
|
|
BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
|
2017-04-18 20:51:52 +00:00
|
|
|
(n) >>= (ic)->sb->log2_sectors_per_block; \
|
|
|
|
} while (0)
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
|
2023-01-25 20:14:58 +00:00
|
|
|
sector_t offset, unsigned int *metadata_offset)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
__u64 ms;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int mo;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
ms = area << ic->sb->log2_interleave_sectors;
|
|
|
|
if (likely(ic->log2_metadata_run >= 0))
|
|
|
|
ms += area << ic->log2_metadata_run;
|
|
|
|
else
|
|
|
|
ms += area * ic->metadata_run;
|
|
|
|
ms >>= ic->log2_buffer_sectors;
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
sector_to_block(ic, offset);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (likely(ic->log2_tag_size >= 0)) {
|
|
|
|
ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
|
|
|
|
mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
|
|
|
|
} else {
|
|
|
|
ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
|
|
|
|
mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
|
|
|
|
}
|
|
|
|
*metadata_offset = mo;
|
|
|
|
return ms;
|
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
|
|
|
|
{
|
|
|
|
sector_t result;
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (ic->meta_dev)
|
|
|
|
return offset;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
result = area << ic->sb->log2_interleave_sectors;
|
|
|
|
if (likely(ic->log2_metadata_run >= 0))
|
|
|
|
result += (area + 1) << ic->log2_metadata_run;
|
|
|
|
else
|
|
|
|
result += (area + 1) * ic->metadata_run;
|
|
|
|
|
|
|
|
result += (sector_t)ic->initial_sectors + offset;
|
2018-07-03 18:13:29 +00:00
|
|
|
result += ic->start;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
if (unlikely(*sec_ptr >= ic->journal_sections))
|
|
|
|
*sec_ptr -= ic->journal_sections;
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:31 +00:00
|
|
|
static void sb_set_version(struct dm_integrity_c *ic)
|
|
|
|
{
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
|
|
|
|
ic->sb->version = SB_VERSION_5;
|
|
|
|
else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
|
2019-11-13 11:48:16 +00:00
|
|
|
ic->sb->version = SB_VERSION_4;
|
|
|
|
else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->version = SB_VERSION_3;
|
|
|
|
else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
|
2018-07-03 18:13:31 +00:00
|
|
|
ic->sb->version = SB_VERSION_2;
|
|
|
|
else
|
|
|
|
ic->sb->version = SB_VERSION_1;
|
|
|
|
}
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
static int sb_mac(struct dm_integrity_c *ic, bool wr)
|
|
|
|
{
|
|
|
|
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int size = crypto_shash_digestsize(ic->journal_mac);
|
2021-01-21 15:09:32 +00:00
|
|
|
|
|
|
|
if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
|
|
|
|
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc->tfm = ic->journal_mac;
|
|
|
|
|
|
|
|
r = crypto_shash_init(desc);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_init", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(wr)) {
|
|
|
|
r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_final", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
__u8 result[HASH_MAX_DIGESTSIZE];
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
r = crypto_shash_final(desc, result);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_final", r);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
|
|
|
|
dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
|
2021-01-21 15:09:32 +00:00
|
|
|
return -EILSEQ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct dm_io_request io_req;
|
|
|
|
struct dm_io_region io_loc;
|
2022-07-14 18:06:52 +00:00
|
|
|
const enum req_op op = opf & REQ_OP_MASK;
|
2021-01-21 15:09:32 +00:00
|
|
|
int r;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
io_req.bi_opf = opf;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_req.mem.type = DM_IO_KMEM;
|
|
|
|
io_req.mem.ptr.addr = ic->sb;
|
|
|
|
io_req.notify.fn = NULL;
|
|
|
|
io_req.client = ic->io;
|
2018-07-03 18:13:30 +00:00
|
|
|
io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_loc.sector = ic->start;
|
|
|
|
io_loc.count = SB_SECTORS;
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
if (op == REQ_OP_WRITE) {
|
2019-05-22 11:29:44 +00:00
|
|
|
sb_set_version(ic);
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
|
|
|
|
r = sb_mac(ic, true);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
2019-05-22 11:29:44 +00:00
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (op == REQ_OP_READ) {
|
|
|
|
if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
|
|
|
|
r = sb_mac(ic, false);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
#define BITMAP_OP_TEST_ALL_SET 0
|
|
|
|
#define BITMAP_OP_TEST_ALL_CLEAR 1
|
|
|
|
#define BITMAP_OP_SET 2
|
|
|
|
#define BITMAP_OP_CLEAR 3
|
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
|
|
|
|
sector_t sector, sector_t n_sectors, int mode)
|
2019-04-29 12:57:24 +00:00
|
|
|
{
|
|
|
|
unsigned long bit, end_bit, this_end_bit, page, end_page;
|
|
|
|
unsigned long *data;
|
|
|
|
|
|
|
|
if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
|
2019-05-09 19:25:49 +00:00
|
|
|
DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
|
2020-03-22 19:42:22 +00:00
|
|
|
sector,
|
|
|
|
n_sectors,
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->log2_sectors_per_block,
|
|
|
|
ic->log2_blocks_per_bitmap_bit,
|
|
|
|
mode);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!n_sectors))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
2019-05-09 19:25:49 +00:00
|
|
|
end_bit = (sector + n_sectors - 1) >>
|
|
|
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
page = bit / (PAGE_SIZE * 8);
|
|
|
|
bit %= PAGE_SIZE * 8;
|
|
|
|
|
|
|
|
end_page = end_bit / (PAGE_SIZE * 8);
|
|
|
|
end_bit %= PAGE_SIZE * 8;
|
|
|
|
|
|
|
|
repeat:
|
2023-02-02 16:10:52 +00:00
|
|
|
if (page < end_page)
|
2019-04-29 12:57:24 +00:00
|
|
|
this_end_bit = PAGE_SIZE * 8 - 1;
|
2023-02-02 16:10:52 +00:00
|
|
|
else
|
2019-04-29 12:57:24 +00:00
|
|
|
this_end_bit = end_bit;
|
|
|
|
|
|
|
|
data = lowmem_page_address(bitmap[page].page);
|
|
|
|
|
|
|
|
if (mode == BITMAP_OP_TEST_ALL_SET) {
|
|
|
|
while (bit <= this_end_bit) {
|
|
|
|
if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
|
|
|
|
do {
|
|
|
|
if (data[bit / BITS_PER_LONG] != -1)
|
|
|
|
return false;
|
|
|
|
bit += BITS_PER_LONG;
|
|
|
|
} while (this_end_bit >= bit + BITS_PER_LONG - 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!test_bit(bit, data))
|
|
|
|
return false;
|
|
|
|
bit++;
|
|
|
|
}
|
|
|
|
} else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
|
|
|
|
while (bit <= this_end_bit) {
|
|
|
|
if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
|
|
|
|
do {
|
|
|
|
if (data[bit / BITS_PER_LONG] != 0)
|
|
|
|
return false;
|
|
|
|
bit += BITS_PER_LONG;
|
|
|
|
} while (this_end_bit >= bit + BITS_PER_LONG - 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (test_bit(bit, data))
|
|
|
|
return false;
|
|
|
|
bit++;
|
|
|
|
}
|
|
|
|
} else if (mode == BITMAP_OP_SET) {
|
|
|
|
while (bit <= this_end_bit) {
|
|
|
|
if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
|
|
|
|
do {
|
|
|
|
data[bit / BITS_PER_LONG] = -1;
|
|
|
|
bit += BITS_PER_LONG;
|
|
|
|
} while (this_end_bit >= bit + BITS_PER_LONG - 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
__set_bit(bit, data);
|
|
|
|
bit++;
|
|
|
|
}
|
|
|
|
} else if (mode == BITMAP_OP_CLEAR) {
|
|
|
|
if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
|
|
|
|
clear_page(data);
|
2023-01-30 21:13:54 +00:00
|
|
|
else {
|
|
|
|
while (bit <= this_end_bit) {
|
|
|
|
if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
|
|
|
|
do {
|
|
|
|
data[bit / BITS_PER_LONG] = 0;
|
|
|
|
bit += BITS_PER_LONG;
|
|
|
|
} while (this_end_bit >= bit + BITS_PER_LONG - 1);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
__clear_bit(bit, data);
|
|
|
|
bit++;
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(page < end_page)) {
|
|
|
|
bit = 0;
|
|
|
|
page++;
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
|
|
|
|
unsigned int i;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
for (i = 0; i < n_bitmap_pages; i++) {
|
|
|
|
unsigned long *dst_data = lowmem_page_address(dst[i].page);
|
|
|
|
unsigned long *src_data = lowmem_page_address(src[i].page);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
copy_page(dst_data, src_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
|
|
|
unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
|
|
|
|
return &ic->bbs[bitmap_block];
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
|
2017-01-04 19:23:53 +00:00
|
|
|
bool e, const char *function)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (unlikely(section >= ic->journal_sections) ||
|
|
|
|
unlikely(offset >= limit)) {
|
2019-05-09 19:25:49 +00:00
|
|
|
DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
|
|
|
|
function, section, offset, ic->journal_sections, limit);
|
2017-01-04 19:23:53 +00:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
|
|
|
|
unsigned int *pl_index, unsigned int *pl_offset)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2017-04-18 20:51:50 +00:00
|
|
|
access_journal_check(ic, section, offset, false, "page_list_location");
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
sector = section * ic->journal_section_sectors + offset;
|
|
|
|
|
|
|
|
*pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
|
|
|
|
*pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int section, unsigned int offset, unsigned int *n_sectors)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pl_index, pl_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
char *va;
|
|
|
|
|
|
|
|
page_list_location(ic, section, offset, &pl_index, &pl_offset);
|
|
|
|
|
|
|
|
if (n_sectors)
|
|
|
|
*n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
|
|
|
|
|
|
|
|
va = lowmem_page_address(pl[pl_index].page);
|
|
|
|
|
|
|
|
return (struct journal_sector *)(va + pl_offset);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
return access_page_list(ic, ic->journal, section, offset, NULL);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int rel_sector, offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_sector *js;
|
|
|
|
|
|
|
|
access_journal_check(ic, section, n, true, "access_journal_entry");
|
|
|
|
|
|
|
|
rel_sector = n % JOURNAL_BLOCK_SECTORS;
|
|
|
|
offset = n / JOURNAL_BLOCK_SECTORS;
|
|
|
|
|
|
|
|
js = access_journal(ic, section, rel_sector);
|
|
|
|
return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2017-04-18 20:51:52 +00:00
|
|
|
n <<= ic->sb->log2_sectors_per_block;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
n += JOURNAL_BLOCK_SECTORS;
|
|
|
|
|
|
|
|
access_journal_check(ic, section, n, false, "access_journal_data");
|
|
|
|
|
|
|
|
return access_journal(ic, section, n);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int j, size;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
desc->tfm = ic->journal_mac;
|
|
|
|
|
|
|
|
r = crypto_shash_init(desc);
|
2021-01-21 15:09:32 +00:00
|
|
|
if (unlikely(r < 0)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "crypto_shash_init", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
|
2021-05-11 15:41:00 +00:00
|
|
|
__le64 section_le;
|
2021-01-21 15:09:32 +00:00
|
|
|
|
|
|
|
r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
section_le = cpu_to_le64(section);
|
2023-02-07 21:16:53 +00:00
|
|
|
r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof(section_le));
|
2021-01-21 15:09:32 +00:00
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
for (j = 0; j < ic->journal_section_entries; j++) {
|
|
|
|
struct journal_entry *je = access_journal_entry(ic, section, j);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2023-02-07 21:16:53 +00:00
|
|
|
r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
|
2021-01-21 15:09:32 +00:00
|
|
|
if (unlikely(r < 0)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
size = crypto_shash_digestsize(ic->journal_mac);
|
|
|
|
|
|
|
|
if (likely(size <= JOURNAL_MAC_SIZE)) {
|
|
|
|
r = crypto_shash_final(desc, result);
|
2021-01-21 15:09:32 +00:00
|
|
|
if (unlikely(r < 0)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "crypto_shash_final", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
memset(result + size, 0, JOURNAL_MAC_SIZE - size);
|
|
|
|
} else {
|
2018-08-07 21:18:39 +00:00
|
|
|
__u8 digest[HASH_MAX_DIGESTSIZE];
|
|
|
|
|
|
|
|
if (WARN_ON(size > sizeof(digest))) {
|
|
|
|
dm_integrity_io_error(ic, "digest_size", -EINVAL);
|
|
|
|
goto err;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
r = crypto_shash_final(desc, digest);
|
2021-01-21 15:09:32 +00:00
|
|
|
if (unlikely(r < 0)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "crypto_shash_final", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
memcpy(result, digest, JOURNAL_MAC_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
err:
|
|
|
|
memset(result, 0, JOURNAL_MAC_SIZE);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
__u8 result[JOURNAL_MAC_SIZE];
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int j;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (!ic->journal_mac)
|
|
|
|
return;
|
|
|
|
|
|
|
|
section_mac(ic, section, result);
|
|
|
|
|
|
|
|
for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
|
|
|
|
struct journal_sector *js = access_journal(ic, section, j);
|
|
|
|
|
|
|
|
if (likely(wr))
|
|
|
|
memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
|
|
|
|
else {
|
2021-09-04 09:59:29 +00:00
|
|
|
if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "journal mac", -EILSEQ);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void complete_journal_op(void *context)
|
|
|
|
{
|
|
|
|
struct journal_completion *comp = context;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
BUG_ON(!atomic_read(&comp->in_flight));
|
|
|
|
if (likely(atomic_dec_and_test(&comp->in_flight)))
|
|
|
|
complete(&comp->comp);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
|
|
|
|
unsigned int n_sections, struct journal_completion *comp)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct async_submit_ctl submit;
|
|
|
|
size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pl_index, pl_offset, section_index;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct page_list *source_pl, *target_pl;
|
|
|
|
|
|
|
|
if (likely(encrypt)) {
|
|
|
|
source_pl = ic->journal;
|
|
|
|
target_pl = ic->journal_io;
|
|
|
|
} else {
|
|
|
|
source_pl = ic->journal_io;
|
|
|
|
target_pl = ic->journal;
|
|
|
|
}
|
|
|
|
|
|
|
|
page_list_location(ic, section, 0, &pl_index, &pl_offset);
|
|
|
|
|
|
|
|
atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
|
|
|
|
|
|
|
|
init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
|
|
|
|
|
|
|
|
section_index = pl_index;
|
|
|
|
|
|
|
|
do {
|
|
|
|
size_t this_step;
|
|
|
|
struct page *src_pages[2];
|
|
|
|
struct page *dst_page;
|
|
|
|
|
|
|
|
while (unlikely(pl_index == section_index)) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int dummy;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (likely(encrypt))
|
|
|
|
rw_section_mac(ic, section, true);
|
|
|
|
section++;
|
|
|
|
n_sections--;
|
|
|
|
if (!n_sections)
|
|
|
|
break;
|
|
|
|
page_list_location(ic, section, 0, §ion_index, &dummy);
|
|
|
|
}
|
|
|
|
|
|
|
|
this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
|
|
|
|
dst_page = target_pl[pl_index].page;
|
|
|
|
src_pages[0] = source_pl[pl_index].page;
|
|
|
|
src_pages[1] = ic->journal_xor[pl_index].page;
|
|
|
|
|
|
|
|
async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
|
|
|
|
|
|
|
|
pl_index++;
|
|
|
|
pl_offset = 0;
|
|
|
|
n_bytes -= this_step;
|
|
|
|
} while (n_bytes);
|
|
|
|
|
|
|
|
BUG_ON(n_sections);
|
|
|
|
|
|
|
|
async_tx_issue_pending_all();
|
|
|
|
}
|
|
|
|
|
2023-02-06 10:22:34 +00:00
|
|
|
static void complete_journal_encrypt(void *data, int err)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-02-06 10:22:34 +00:00
|
|
|
struct journal_completion *comp = data;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(err)) {
|
|
|
|
if (likely(err == -EINPROGRESS)) {
|
|
|
|
complete(&comp->ic->crypto_backoff);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
|
|
|
|
}
|
|
|
|
complete_journal_op(comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
|
|
|
|
{
|
|
|
|
int r;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2018-09-05 13:17:45 +00:00
|
|
|
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
2017-01-04 19:23:53 +00:00
|
|
|
complete_journal_encrypt, comp);
|
|
|
|
if (likely(encrypt))
|
|
|
|
r = crypto_skcipher_encrypt(req);
|
|
|
|
else
|
|
|
|
r = crypto_skcipher_decrypt(req);
|
|
|
|
if (likely(!r))
|
|
|
|
return false;
|
|
|
|
if (likely(r == -EINPROGRESS))
|
|
|
|
return true;
|
|
|
|
if (likely(r == -EBUSY)) {
|
|
|
|
wait_for_completion(&comp->ic->crypto_backoff);
|
|
|
|
reinit_completion(&comp->ic->crypto_backoff);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
dm_integrity_io_error(comp->ic, "encrypt", r);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
|
|
|
|
unsigned int n_sections, struct journal_completion *comp)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct scatterlist **source_sg;
|
|
|
|
struct scatterlist **target_sg;
|
|
|
|
|
|
|
|
atomic_add(2, &comp->in_flight);
|
|
|
|
|
|
|
|
if (likely(encrypt)) {
|
|
|
|
source_sg = ic->journal_scatterlist;
|
|
|
|
target_sg = ic->journal_io_scatterlist;
|
|
|
|
} else {
|
|
|
|
source_sg = ic->journal_io_scatterlist;
|
|
|
|
target_sg = ic->journal_scatterlist;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct skcipher_request *req;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int ivsize;
|
2017-01-04 19:23:53 +00:00
|
|
|
char *iv;
|
|
|
|
|
|
|
|
if (likely(encrypt))
|
|
|
|
rw_section_mac(ic, section, true);
|
|
|
|
|
|
|
|
req = ic->sk_requests[section];
|
|
|
|
ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
|
|
|
|
iv = req->iv;
|
|
|
|
|
|
|
|
memcpy(iv, iv + ivsize, ivsize);
|
|
|
|
|
|
|
|
req->src = source_sg[section];
|
|
|
|
req->dst = target_sg[section];
|
|
|
|
|
|
|
|
if (unlikely(do_crypt(encrypt, req, comp)))
|
|
|
|
atomic_inc(&comp->in_flight);
|
|
|
|
|
|
|
|
section++;
|
|
|
|
n_sections--;
|
|
|
|
} while (n_sections);
|
|
|
|
|
|
|
|
atomic_dec(&comp->in_flight);
|
|
|
|
complete_journal_op(comp);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
|
|
|
|
unsigned int n_sections, struct journal_completion *comp)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
if (ic->journal_xor)
|
|
|
|
return xor_journal(ic, encrypt, section, n_sections, comp);
|
|
|
|
else
|
|
|
|
return crypt_journal(ic, encrypt, section, n_sections, comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void complete_journal_io(unsigned long error, void *context)
|
|
|
|
{
|
|
|
|
struct journal_completion *comp = context;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(error != 0))
|
|
|
|
dm_integrity_io_error(comp->ic, "writing journal", -EIO);
|
|
|
|
complete_journal_op(comp);
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector, unsigned int n_sectors,
|
2022-07-14 18:06:52 +00:00
|
|
|
struct journal_completion *comp)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct dm_io_request io_req;
|
|
|
|
struct dm_io_region io_loc;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pl_index, pl_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
int r;
|
|
|
|
|
|
|
|
if (unlikely(dm_integrity_failed(ic))) {
|
|
|
|
if (comp)
|
|
|
|
complete_journal_io(-1UL, comp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
|
|
|
|
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
io_req.bi_opf = opf;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_req.mem.type = DM_IO_PAGE_LIST;
|
|
|
|
if (ic->journal_io)
|
|
|
|
io_req.mem.ptr.pl = &ic->journal_io[pl_index];
|
|
|
|
else
|
|
|
|
io_req.mem.ptr.pl = &ic->journal[pl_index];
|
|
|
|
io_req.mem.offset = pl_offset;
|
|
|
|
if (likely(comp != NULL)) {
|
|
|
|
io_req.notify.fn = complete_journal_io;
|
|
|
|
io_req.notify.context = comp;
|
|
|
|
} else {
|
|
|
|
io_req.notify.fn = NULL;
|
|
|
|
}
|
|
|
|
io_req.client = ic->io;
|
2018-07-03 18:13:30 +00:00
|
|
|
io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_loc.sector = ic->start + SB_SECTORS + sector;
|
|
|
|
io_loc.count = n_sectors;
|
|
|
|
|
|
|
|
r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
|
|
if (unlikely(r)) {
|
2022-07-14 18:06:52 +00:00
|
|
|
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
|
|
|
|
"reading journal" : "writing journal", r);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (comp) {
|
|
|
|
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
|
|
|
|
complete_journal_io(-1UL, comp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int section, unsigned int n_sections,
|
2022-07-14 18:06:52 +00:00
|
|
|
struct journal_completion *comp)
|
2019-04-29 12:57:19 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector, n_sectors;
|
2019-04-29 12:57:19 +00:00
|
|
|
|
|
|
|
sector = section * ic->journal_section_sectors;
|
|
|
|
n_sectors = n_sections * ic->journal_section_sectors;
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, opf, sector, n_sectors, comp);
|
2019-04-29 12:57:19 +00:00
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct journal_completion io_comp;
|
|
|
|
struct journal_completion crypt_comp_1;
|
|
|
|
struct journal_completion crypt_comp_2;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
io_comp.ic = ic;
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&io_comp.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (commit_start + commit_sections <= ic->journal_sections) {
|
|
|
|
io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
|
|
|
if (ic->journal_io) {
|
|
|
|
crypt_comp_1.ic = ic;
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&crypt_comp_1.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
|
|
|
|
encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
|
|
|
|
wait_for_completion_io(&crypt_comp_1.comp);
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < commit_sections; i++)
|
|
|
|
rw_section_mac(ic, commit_start + i, true);
|
|
|
|
}
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
|
2017-05-31 07:44:32 +00:00
|
|
|
commit_sections, &io_comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
} else {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int to_end;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
|
|
|
|
to_end = ic->journal_sections - commit_start;
|
|
|
|
if (ic->journal_io) {
|
|
|
|
crypt_comp_1.ic = ic;
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&crypt_comp_1.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
|
|
|
|
encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
|
|
|
|
if (try_wait_for_completion(&crypt_comp_1.comp)) {
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
|
|
|
|
commit_start, to_end, &io_comp);
|
2017-08-15 15:11:59 +00:00
|
|
|
reinit_completion(&crypt_comp_1.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
|
|
|
|
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
|
|
|
|
wait_for_completion_io(&crypt_comp_1.comp);
|
|
|
|
} else {
|
|
|
|
crypt_comp_2.ic = ic;
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&crypt_comp_2.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
|
|
|
|
encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
|
|
|
|
wait_for_completion_io(&crypt_comp_1.comp);
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
wait_for_completion_io(&crypt_comp_2.comp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (i = 0; i < to_end; i++)
|
|
|
|
rw_section_mac(ic, commit_start + i, true);
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
for (i = 0; i < commit_sections - to_end; i++)
|
|
|
|
rw_section_mac(ic, i, true);
|
|
|
|
}
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_completion_io(&io_comp.comp);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
|
|
|
|
unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct dm_io_request io_req;
|
|
|
|
struct dm_io_region io_loc;
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector, pl_index, pl_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
|
2017-04-18 20:51:52 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(dm_integrity_failed(ic))) {
|
|
|
|
fn(-1UL, data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
|
|
|
|
|
|
|
|
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
|
|
|
|
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
|
|
|
|
|
2022-07-14 18:06:47 +00:00
|
|
|
io_req.bi_opf = REQ_OP_WRITE;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_req.mem.type = DM_IO_PAGE_LIST;
|
|
|
|
io_req.mem.ptr.pl = &ic->journal[pl_index];
|
|
|
|
io_req.mem.offset = pl_offset;
|
|
|
|
io_req.notify.fn = fn;
|
|
|
|
io_req.notify.context = data;
|
|
|
|
io_req.client = ic->io;
|
|
|
|
io_loc.bdev = ic->dev->bdev;
|
2018-07-03 18:13:29 +00:00
|
|
|
io_loc.sector = target;
|
2017-01-04 19:23:53 +00:00
|
|
|
io_loc.count = n_sectors;
|
|
|
|
|
|
|
|
r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
|
|
if (unlikely(r)) {
|
|
|
|
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
|
|
|
|
fn(-1UL, data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:27 +00:00
|
|
|
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
|
|
|
|
{
|
|
|
|
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
|
2019-04-05 19:26:39 +00:00
|
|
|
range1->logical_sector + range1->n_sectors > range2->logical_sector;
|
2018-07-03 18:13:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct rb_node **n = &ic->in_progress.rb_node;
|
|
|
|
struct rb_node *parent;
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
|
2017-04-18 20:51:52 +00:00
|
|
|
|
2018-07-03 18:13:27 +00:00
|
|
|
if (likely(check_waiting)) {
|
|
|
|
struct dm_integrity_range *range;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2018-07-03 18:13:27 +00:00
|
|
|
list_for_each_entry(range, &ic->wait_list, wait_entry) {
|
|
|
|
if (unlikely(ranges_overlap(range, new_range)))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
parent = NULL;
|
|
|
|
|
|
|
|
while (*n) {
|
|
|
|
struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
|
|
|
|
|
|
|
|
parent = *n;
|
2023-02-02 16:10:52 +00:00
|
|
|
if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
|
2017-01-04 19:23:53 +00:00
|
|
|
n = &range->node.rb_left;
|
2023-02-02 16:10:52 +00:00
|
|
|
else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
|
2017-01-04 19:23:53 +00:00
|
|
|
n = &range->node.rb_right;
|
2023-02-02 16:10:52 +00:00
|
|
|
else
|
2017-01-04 19:23:53 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&new_range->node, parent, n);
|
|
|
|
rb_insert_color(&new_range->node, &ic->in_progress);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
|
|
|
|
{
|
|
|
|
rb_erase(&range->node, &ic->in_progress);
|
2018-07-03 18:13:27 +00:00
|
|
|
while (unlikely(!list_empty(&ic->wait_list))) {
|
|
|
|
struct dm_integrity_range *last_range =
|
|
|
|
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
|
|
|
|
struct task_struct *last_range_task;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2018-07-03 18:13:27 +00:00
|
|
|
last_range_task = last_range->task;
|
|
|
|
list_del(&last_range->wait_entry);
|
|
|
|
if (!add_new_range(ic, last_range, false)) {
|
|
|
|
last_range->task = last_range_task;
|
|
|
|
list_add(&last_range->wait_entry, &ic->wait_list);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
last_range->waiting = false;
|
|
|
|
wake_up_process(last_range_task);
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ic->endio_wait.lock, flags);
|
|
|
|
remove_range_unlocked(ic, range);
|
|
|
|
spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:27 +00:00
|
|
|
static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
|
|
|
|
{
|
|
|
|
new_range->waiting = true;
|
|
|
|
list_add_tail(&new_range->wait_entry, &ic->wait_list);
|
|
|
|
new_range->task = current;
|
|
|
|
do {
|
|
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
io_schedule();
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
} while (unlikely(new_range->waiting));
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:22 +00:00
|
|
|
static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
|
|
|
|
{
|
|
|
|
if (unlikely(!add_new_range(ic, new_range, true)))
|
|
|
|
wait_and_add_new_range(ic, new_range);
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
static void init_journal_node(struct journal_node *node)
|
|
|
|
{
|
|
|
|
RB_CLEAR_NODE(&node->node);
|
|
|
|
node->sector = (sector_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
|
|
|
|
{
|
|
|
|
struct rb_node **link;
|
|
|
|
struct rb_node *parent;
|
|
|
|
|
|
|
|
node->sector = sector;
|
|
|
|
BUG_ON(!RB_EMPTY_NODE(&node->node));
|
|
|
|
|
|
|
|
link = &ic->journal_tree_root.rb_node;
|
|
|
|
parent = NULL;
|
|
|
|
|
|
|
|
while (*link) {
|
|
|
|
struct journal_node *j;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
parent = *link;
|
|
|
|
j = container_of(parent, struct journal_node, node);
|
|
|
|
if (sector < j->sector)
|
|
|
|
link = &j->node.rb_left;
|
|
|
|
else
|
|
|
|
link = &j->node.rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&node->node, parent, link);
|
|
|
|
rb_insert_color(&node->node, &ic->journal_tree_root);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
|
|
|
|
{
|
|
|
|
BUG_ON(RB_EMPTY_NODE(&node->node));
|
|
|
|
rb_erase(&node->node, &ic->journal_tree_root);
|
|
|
|
init_journal_node(node);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NOT_FOUND (-1U)
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct rb_node *n = ic->journal_tree_root.rb_node;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int found = NOT_FOUND;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
*next_sector = (sector_t)-1;
|
|
|
|
while (n) {
|
|
|
|
struct journal_node *j = container_of(n, struct journal_node, node);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2023-02-02 16:10:52 +00:00
|
|
|
if (sector == j->sector)
|
2017-01-04 19:23:53 +00:00
|
|
|
found = j - ic->journal_tree;
|
2023-02-02 16:10:52 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (sector < j->sector) {
|
|
|
|
*next_sector = j->sector;
|
|
|
|
n = j->node.rb_left;
|
2023-02-02 16:10:52 +00:00
|
|
|
} else
|
2017-01-04 19:23:53 +00:00
|
|
|
n = j->node.rb_right;
|
|
|
|
}
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct journal_node *node, *next_node;
|
|
|
|
struct rb_node *next;
|
|
|
|
|
|
|
|
if (unlikely(pos >= ic->journal_entries))
|
|
|
|
return false;
|
|
|
|
node = &ic->journal_tree[pos];
|
|
|
|
if (unlikely(RB_EMPTY_NODE(&node->node)))
|
|
|
|
return false;
|
|
|
|
if (unlikely(node->sector != sector))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
next = rb_next(&node->node);
|
|
|
|
if (unlikely(!next))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
next_node = container_of(next, struct journal_node, node);
|
|
|
|
return next_node->sector != sector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
|
|
|
|
{
|
|
|
|
struct rb_node *next;
|
|
|
|
struct journal_node *next_node;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int next_section;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
BUG_ON(RB_EMPTY_NODE(&node->node));
|
|
|
|
|
|
|
|
next = rb_next(&node->node);
|
|
|
|
if (unlikely(!next))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
next_node = container_of(next, struct journal_node, node);
|
|
|
|
|
|
|
|
if (next_node->sector != node->sector)
|
|
|
|
return false;
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
|
2017-01-04 19:23:53 +00:00
|
|
|
if (next_section >= ic->committed_section &&
|
|
|
|
next_section < ic->committed_section + ic->n_committed_sections)
|
|
|
|
return true;
|
|
|
|
if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TAG_READ 0
|
|
|
|
#define TAG_WRITE 1
|
|
|
|
#define TAG_CMP 2
|
|
|
|
|
|
|
|
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int *metadata_offset, unsigned int total_size, int op)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2020-03-22 19:42:26 +00:00
|
|
|
#define MAY_BE_FILLER 1
|
|
|
|
#define MAY_BE_HASH 2
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int hash_offset = 0;
|
|
|
|
unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
|
2020-03-22 19:42:26 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
do {
|
|
|
|
unsigned char *data, *dp;
|
|
|
|
struct dm_buffer *b;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int to_copy;
|
2017-01-04 19:23:53 +00:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = dm_integrity_failed(ic);
|
|
|
|
if (unlikely(r))
|
|
|
|
return r;
|
|
|
|
|
|
|
|
data = dm_bufio_read(ic->bufio, *metadata_block, &b);
|
2019-02-13 05:46:56 +00:00
|
|
|
if (IS_ERR(data))
|
2017-01-04 19:23:53 +00:00
|
|
|
return PTR_ERR(data);
|
|
|
|
|
|
|
|
to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
|
|
|
|
dp = data + *metadata_offset;
|
|
|
|
if (op == TAG_READ) {
|
|
|
|
memcpy(tag, dp, to_copy);
|
|
|
|
} else if (op == TAG_WRITE) {
|
2021-04-27 15:57:06 +00:00
|
|
|
if (memcmp(dp, tag, to_copy)) {
|
|
|
|
memcpy(dp, tag, to_copy);
|
|
|
|
dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
|
|
|
|
}
|
2020-03-22 19:42:26 +00:00
|
|
|
} else {
|
2017-01-04 19:23:53 +00:00
|
|
|
/* e.g.: op == TAG_CMP */
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (likely(is_power_of_2(ic->tag_size))) {
|
|
|
|
if (unlikely(memcmp(dp, tag, to_copy)))
|
|
|
|
if (unlikely(!ic->discard) ||
|
2020-04-03 17:05:50 +00:00
|
|
|
unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
|
2020-03-22 19:42:26 +00:00
|
|
|
goto thorough_test;
|
|
|
|
}
|
|
|
|
} else {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i, ts;
|
2020-03-22 19:42:26 +00:00
|
|
|
thorough_test:
|
|
|
|
ts = total_size;
|
|
|
|
|
|
|
|
for (i = 0; i < to_copy; i++, ts--) {
|
|
|
|
if (unlikely(dp[i] != tag[i]))
|
|
|
|
may_be &= ~MAY_BE_HASH;
|
|
|
|
if (likely(dp[i] != DISCARD_FILLER))
|
|
|
|
may_be &= ~MAY_BE_FILLER;
|
|
|
|
hash_offset++;
|
|
|
|
if (unlikely(hash_offset == ic->tag_size)) {
|
|
|
|
if (unlikely(!may_be)) {
|
|
|
|
dm_bufio_release(b);
|
|
|
|
return ts;
|
|
|
|
}
|
|
|
|
hash_offset = 0;
|
|
|
|
may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dm_bufio_release(b);
|
|
|
|
|
|
|
|
tag += to_copy;
|
|
|
|
*metadata_offset += to_copy;
|
|
|
|
if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
|
|
|
|
(*metadata_block)++;
|
|
|
|
*metadata_offset = 0;
|
|
|
|
}
|
2020-03-22 19:42:26 +00:00
|
|
|
|
2023-02-02 16:10:52 +00:00
|
|
|
if (unlikely(!is_power_of_2(ic->tag_size)))
|
2020-03-22 19:42:26 +00:00
|
|
|
hash_offset = (hash_offset + to_copy) % ic->tag_size;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
total_size -= to_copy;
|
|
|
|
} while (unlikely(total_size));
|
|
|
|
|
|
|
|
return 0;
|
2020-03-22 19:42:26 +00:00
|
|
|
#undef MAY_BE_FILLER
|
|
|
|
#undef MAY_BE_HASH
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
struct flush_request {
|
|
|
|
struct dm_io_request io_req;
|
|
|
|
struct dm_io_region io_reg;
|
|
|
|
struct dm_integrity_c *ic;
|
|
|
|
struct completion comp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void flush_notify(unsigned long error, void *fr_)
|
|
|
|
{
|
|
|
|
struct flush_request *fr = fr_;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
if (unlikely(error != 0))
|
2021-01-11 09:25:55 +00:00
|
|
|
dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
|
2021-01-08 16:15:56 +00:00
|
|
|
complete(&fr->comp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
int r;
|
2021-01-08 16:15:56 +00:00
|
|
|
struct flush_request fr;
|
|
|
|
|
|
|
|
if (!ic->meta_dev)
|
|
|
|
flush_data = false;
|
|
|
|
if (flush_data) {
|
2022-07-14 18:06:47 +00:00
|
|
|
fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
|
2021-01-08 16:15:56 +00:00
|
|
|
fr.io_req.mem.type = DM_IO_KMEM,
|
|
|
|
fr.io_req.mem.ptr.addr = NULL,
|
|
|
|
fr.io_req.notify.fn = flush_notify,
|
|
|
|
fr.io_req.notify.context = &fr;
|
|
|
|
fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
|
|
|
|
fr.io_reg.bdev = ic->dev->bdev,
|
|
|
|
fr.io_reg.sector = 0,
|
|
|
|
fr.io_reg.count = 0,
|
|
|
|
fr.ic = ic;
|
|
|
|
init_completion(&fr.comp);
|
|
|
|
r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
|
|
|
|
BUG_ON(r);
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
r = dm_bufio_write_dirty_buffers(ic->bufio);
|
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing tags", r);
|
2021-01-08 16:15:56 +00:00
|
|
|
|
|
|
|
if (flush_data)
|
|
|
|
wait_for_completion(&fr.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
DECLARE_WAITQUEUE(wait, current);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
__add_wait_queue(&ic->endio_wait, &wait);
|
|
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
io_schedule();
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
__remove_wait_queue(&ic->endio_wait, &wait);
|
|
|
|
}
|
|
|
|
|
2017-10-17 00:01:48 +00:00
|
|
|
static void autocommit_fn(struct timer_list *t)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2017-10-17 00:01:48 +00:00
|
|
|
struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (likely(!dm_integrity_failed(ic)))
|
|
|
|
queue_work(ic->commit_wq, &ic->commit_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void schedule_autocommit(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
if (!timer_pending(&ic->autocommit_timer))
|
|
|
|
mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
|
|
|
|
{
|
|
|
|
struct bio *bio;
|
2017-06-19 14:55:47 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ic->endio_wait.lock, flags);
|
2017-01-04 19:23:53 +00:00
|
|
|
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
|
|
|
bio_list_add(&ic->flush_bio_list, bio);
|
2017-06-19 14:55:47 +00:00
|
|
|
spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
queue_work(ic->commit_wq, &ic->commit_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
|
|
|
|
{
|
2023-02-01 22:42:29 +00:00
|
|
|
int r;
|
|
|
|
|
|
|
|
r = dm_integrity_failed(ic);
|
2017-06-03 07:38:06 +00:00
|
|
|
if (unlikely(r) && !bio->bi_status)
|
|
|
|
bio->bi_status = errno_to_blk_status(r);
|
2019-04-29 12:57:26 +00:00
|
|
|
if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
|
|
|
|
unsigned long flags;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2019-04-29 12:57:26 +00:00
|
|
|
spin_lock_irqsave(&ic->endio_wait.lock, flags);
|
|
|
|
bio_list_add(&ic->synchronous_bios, bio);
|
|
|
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
|
|
|
|
spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
bio_endio(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
|
|
|
|
{
|
|
|
|
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
|
|
|
|
2017-06-03 07:38:06 +00:00
|
|
|
if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
|
2017-01-04 19:23:53 +00:00
|
|
|
submit_flush_bio(ic, dio);
|
|
|
|
else
|
|
|
|
do_endio(ic, bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dec_in_flight(struct dm_integrity_io *dio)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&dio->in_flight)) {
|
|
|
|
struct dm_integrity_c *ic = dio->ic;
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
remove_range(ic, &dio->range);
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
|
2017-01-04 19:23:53 +00:00
|
|
|
schedule_autocommit(ic);
|
|
|
|
|
|
|
|
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
2017-06-03 07:38:06 +00:00
|
|
|
if (unlikely(dio->bi_status) && !bio->bi_status)
|
|
|
|
bio->bi_status = dio->bi_status;
|
|
|
|
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dio->range.logical_sector += dio->range.n_sectors;
|
|
|
|
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
|
|
|
|
INIT_WORK(&dio->work, integrity_bio_wait);
|
2020-02-17 12:43:03 +00:00
|
|
|
queue_work(ic->offload_wq, &dio->work);
|
2017-01-04 19:23:53 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
do_endio_flush(ic, dio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_end_io(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
|
|
|
|
2020-02-28 23:11:53 +00:00
|
|
|
dm_bio_restore(&dio->bio_details, bio);
|
|
|
|
if (bio->bi_integrity)
|
2017-01-04 19:23:53 +00:00
|
|
|
bio->bi_opf |= REQ_INTEGRITY;
|
|
|
|
|
|
|
|
if (dio->completion)
|
|
|
|
complete(dio->completion);
|
|
|
|
|
|
|
|
dec_in_flight(dio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
|
|
|
|
const char *data, char *result)
|
|
|
|
{
|
2021-05-11 15:41:00 +00:00
|
|
|
__le64 sector_le = cpu_to_le64(sector);
|
2017-01-04 19:23:53 +00:00
|
|
|
SHASH_DESC_ON_STACK(req, ic->internal_hash);
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int digest_size;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
req->tfm = ic->internal_hash;
|
|
|
|
|
|
|
|
r = crypto_shash_init(req);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_init", r);
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
|
|
|
|
r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-07 21:16:53 +00:00
|
|
|
r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof(sector_le));
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_update", r);
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = crypto_shash_final(req, result);
|
|
|
|
if (unlikely(r < 0)) {
|
|
|
|
dm_integrity_io_error(ic, "crypto_shash_final", r);
|
|
|
|
goto failed;
|
|
|
|
}
|
|
|
|
|
|
|
|
digest_size = crypto_shash_digestsize(ic->internal_hash);
|
|
|
|
if (unlikely(digest_size < ic->tag_size))
|
|
|
|
memset(result + digest_size, 0, ic->tag_size - digest_size);
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
failed:
|
|
|
|
/* this shouldn't happen anyway, the hash functions have no reason to fail */
|
|
|
|
get_random_bytes(result, ic->tag_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_metadata(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
|
|
|
|
struct dm_integrity_c *ic = dio->ic;
|
|
|
|
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (ic->internal_hash) {
|
|
|
|
struct bvec_iter iter;
|
|
|
|
struct bio_vec bv;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
|
2017-01-04 19:23:53 +00:00
|
|
|
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
|
|
|
char *checksums;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
|
2023-02-07 21:22:08 +00:00
|
|
|
char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
|
2020-03-22 19:42:26 +00:00
|
|
|
sector_t sector;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sectors_to_process;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2017-03-17 16:40:51 +00:00
|
|
|
if (unlikely(ic->mode == 'R'))
|
|
|
|
goto skip_io;
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (likely(dio->op != REQ_OP_DISCARD))
|
|
|
|
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
|
|
|
|
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
|
|
|
|
else
|
|
|
|
checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
|
2018-08-07 21:18:39 +00:00
|
|
|
if (!checksums) {
|
2017-01-04 19:23:53 +00:00
|
|
|
checksums = checksums_onstack;
|
2018-08-07 21:18:39 +00:00
|
|
|
if (WARN_ON(extra_space &&
|
|
|
|
digest_size > sizeof(checksums_onstack))) {
|
|
|
|
r = -EINVAL;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(dio->op == REQ_OP_DISCARD)) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
|
|
|
|
unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
|
|
|
|
unsigned int max_blocks = max_size / ic->tag_size;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
memset(checksums, DISCARD_FILLER, max_size);
|
|
|
|
|
|
|
|
while (bi_size) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
this_step_blocks = min(this_step_blocks, max_blocks);
|
|
|
|
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
|
|
|
|
this_step_blocks * ic->tag_size, TAG_WRITE);
|
|
|
|
if (unlikely(r)) {
|
|
|
|
if (likely(checksums != checksums_onstack))
|
|
|
|
kfree(checksums);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(checksums != checksums_onstack))
|
|
|
|
kfree(checksums);
|
|
|
|
goto skip_io;
|
|
|
|
}
|
|
|
|
|
|
|
|
sector = dio->range.logical_sector;
|
|
|
|
sectors_to_process = dio->range.n_sectors;
|
|
|
|
|
2020-02-28 23:11:53 +00:00
|
|
|
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int pos;
|
2017-01-04 19:23:53 +00:00
|
|
|
char *mem, *checksums_ptr;
|
|
|
|
|
|
|
|
again:
|
2021-10-19 13:44:03 +00:00
|
|
|
mem = bvec_kmap_local(&bv);
|
2017-01-04 19:23:53 +00:00
|
|
|
pos = 0;
|
|
|
|
checksums_ptr = checksums;
|
|
|
|
do {
|
|
|
|
integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
|
|
|
|
checksums_ptr += ic->tag_size;
|
2017-04-18 20:51:52 +00:00
|
|
|
sectors_to_process -= ic->sectors_per_block;
|
|
|
|
pos += ic->sectors_per_block << SECTOR_SHIFT;
|
|
|
|
sector += ic->sectors_per_block;
|
2017-01-04 19:23:53 +00:00
|
|
|
} while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
|
2021-10-19 13:44:03 +00:00
|
|
|
kunmap_local(mem);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
|
2020-03-22 19:42:26 +00:00
|
|
|
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(r)) {
|
|
|
|
if (r > 0) {
|
2021-09-04 09:59:29 +00:00
|
|
|
sector_t s;
|
|
|
|
|
|
|
|
s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
|
2022-03-04 18:01:00 +00:00
|
|
|
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
|
|
|
|
bio->bi_bdev, s);
|
2017-01-04 19:23:53 +00:00
|
|
|
r = -EILSEQ;
|
2017-07-21 16:00:00 +00:00
|
|
|
atomic64_inc(&ic->number_of_mismatches);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
|
|
|
|
bio, s, 0);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
if (likely(checksums != checksums_onstack))
|
|
|
|
kfree(checksums);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sectors_to_process)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (unlikely(pos < bv.bv_len)) {
|
|
|
|
bv.bv_offset += pos;
|
|
|
|
bv.bv_len -= pos;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(checksums != checksums_onstack))
|
|
|
|
kfree(checksums);
|
|
|
|
} else {
|
2020-02-28 23:11:53 +00:00
|
|
|
struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (bip) {
|
|
|
|
struct bio_vec biv;
|
|
|
|
struct bvec_iter iter;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int data_to_process = dio->range.n_sectors;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
sector_to_block(ic, data_to_process);
|
|
|
|
data_to_process *= ic->tag_size;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
bip_for_each_vec(biv, bip, iter) {
|
|
|
|
unsigned char *tag;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int this_len;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
BUG_ON(PageHighMem(biv.bv_page));
|
2021-08-04 09:56:24 +00:00
|
|
|
tag = bvec_virt(&biv);
|
2017-01-04 19:23:53 +00:00
|
|
|
this_len = min(biv.bv_len, data_to_process);
|
|
|
|
r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
|
2020-03-22 19:42:26 +00:00
|
|
|
this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
goto error;
|
|
|
|
data_to_process -= this_len;
|
|
|
|
if (!data_to_process)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-03-17 16:40:51 +00:00
|
|
|
skip_io:
|
2017-01-04 19:23:53 +00:00
|
|
|
dec_in_flight(dio);
|
|
|
|
return;
|
|
|
|
error:
|
2017-06-03 07:38:06 +00:00
|
|
|
dio->bi_status = errno_to_blk_status(r);
|
2017-01-04 19:23:53 +00:00
|
|
|
dec_in_flight(dio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = ti->private;
|
|
|
|
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
2017-04-18 20:51:52 +00:00
|
|
|
struct bio_integrity_payload *bip;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
sector_t area, offset;
|
|
|
|
|
|
|
|
dio->ic = ic;
|
2017-06-03 07:38:06 +00:00
|
|
|
dio->bi_status = 0;
|
2020-03-22 19:42:26 +00:00
|
|
|
dio->op = bio_op(bio);
|
|
|
|
|
|
|
|
if (unlikely(dio->op == REQ_OP_DISCARD)) {
|
|
|
|
if (ti->max_io_len) {
|
|
|
|
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int log2_max_io_len = __fls(ti->max_io_len);
|
2020-03-22 19:42:26 +00:00
|
|
|
sector_t start_boundary = sec >> log2_max_io_len;
|
|
|
|
sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (start_boundary < end_boundary) {
|
|
|
|
sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
dm_accept_partial_bio(bio, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
|
|
|
|
submit_flush_bio(ic, dio);
|
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
|
2020-03-22 19:42:26 +00:00
|
|
|
dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(dio->fua)) {
|
|
|
|
/*
|
|
|
|
* Don't pass down the FUA flag because we have to flush
|
|
|
|
* disk cache anyway.
|
|
|
|
*/
|
|
|
|
bio->bi_opf &= ~REQ_FUA;
|
|
|
|
}
|
|
|
|
if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
|
|
|
|
DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
|
2020-03-22 19:42:22 +00:00
|
|
|
dio->range.logical_sector, bio_sectors(bio),
|
|
|
|
ic->provided_data_sectors);
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
2023-01-25 20:14:58 +00:00
|
|
|
if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
|
2017-04-18 20:51:52 +00:00
|
|
|
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
|
|
|
|
ic->sectors_per_block,
|
2020-03-22 19:42:22 +00:00
|
|
|
dio->range.logical_sector, bio_sectors(bio));
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
|
2017-04-18 20:51:52 +00:00
|
|
|
struct bvec_iter iter;
|
|
|
|
struct bio_vec bv;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
bio_for_each_segment(bv, bio, iter) {
|
2017-11-07 15:40:40 +00:00
|
|
|
if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
|
2017-04-18 20:51:52 +00:00
|
|
|
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
|
|
|
|
bv.bv_offset, bv.bv_len, ic->sectors_per_block);
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bip = bio_integrity(bio);
|
|
|
|
if (!ic->internal_hash) {
|
|
|
|
if (bip) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
if (ic->log2_tag_size >= 0)
|
|
|
|
wanted_tag_size <<= ic->log2_tag_size;
|
|
|
|
else
|
|
|
|
wanted_tag_size *= ic->tag_size;
|
|
|
|
if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
|
2019-05-09 19:25:49 +00:00
|
|
|
DMERR("Invalid integrity data size %u, expected %u",
|
|
|
|
bip->bip_iter.bi_size, wanted_tag_size);
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (unlikely(bip != NULL)) {
|
|
|
|
DMERR("Unexpected integrity data when using internal hash");
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
|
2017-06-03 07:38:02 +00:00
|
|
|
return DM_MAPIO_KILL;
|
2017-03-17 16:40:51 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
|
|
|
|
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
|
|
|
|
bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
|
|
|
|
|
|
|
|
dm_integrity_map_continue(dio, true);
|
|
|
|
return DM_MAPIO_SUBMITTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_section, unsigned int journal_entry)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = dio->ic;
|
|
|
|
sector_t logical_sector;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int n_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
logical_sector = dio->range.logical_sector;
|
|
|
|
n_sectors = dio->range.n_sectors;
|
|
|
|
do {
|
|
|
|
struct bio_vec bv = bio_iovec(bio);
|
|
|
|
char *mem;
|
|
|
|
|
|
|
|
if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
|
|
|
|
bv.bv_len = n_sectors << SECTOR_SHIFT;
|
|
|
|
n_sectors -= bv.bv_len >> SECTOR_SHIFT;
|
|
|
|
bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
|
|
|
|
retry_kmap:
|
2021-12-15 17:31:51 +00:00
|
|
|
mem = kmap_local_page(bv.bv_page);
|
2020-03-22 19:42:26 +00:00
|
|
|
if (likely(dio->op == REQ_OP_WRITE))
|
2017-01-04 19:23:53 +00:00
|
|
|
flush_dcache_page(bv.bv_page);
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(dio->op == REQ_OP_READ)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_sector *js;
|
2017-04-18 20:51:52 +00:00
|
|
|
char *mem_ptr;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int s;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (unlikely(journal_entry_is_inprogress(je))) {
|
|
|
|
flush_dcache_page(bv.bv_page);
|
2021-10-19 13:44:04 +00:00
|
|
|
kunmap_local(mem);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
__io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
|
|
|
|
goto retry_kmap;
|
|
|
|
}
|
|
|
|
smp_rmb();
|
|
|
|
BUG_ON(journal_entry_get_sector(je) != logical_sector);
|
|
|
|
js = access_journal_data(ic, journal_section, journal_entry);
|
2017-04-18 20:51:52 +00:00
|
|
|
mem_ptr = mem + bv.bv_offset;
|
|
|
|
s = 0;
|
|
|
|
do {
|
|
|
|
memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
|
|
|
|
*(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
|
|
|
|
js++;
|
|
|
|
mem_ptr += 1 << SECTOR_SHIFT;
|
|
|
|
} while (++s < ic->sectors_per_block);
|
2017-01-04 19:23:53 +00:00
|
|
|
#ifdef INTERNAL_VERIFY
|
|
|
|
if (ic->internal_hash) {
|
2023-02-07 21:22:08 +00:00
|
|
|
char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
|
2017-04-18 20:51:52 +00:00
|
|
|
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
|
2019-03-06 13:29:34 +00:00
|
|
|
DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
|
2020-03-22 19:42:22 +00:00
|
|
|
logical_sector);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
|
|
|
|
bio, logical_sector, 0);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ic->internal_hash) {
|
|
|
|
struct bio_integrity_payload *bip = bio_integrity(bio);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int tag_todo = ic->tag_size;
|
2017-04-18 20:51:52 +00:00
|
|
|
char *tag_ptr = journal_entry_tag(ic, je);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-01-30 21:13:54 +00:00
|
|
|
if (bip) {
|
|
|
|
do {
|
|
|
|
struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
|
|
|
|
unsigned int tag_now = min(biv.bv_len, tag_todo);
|
|
|
|
char *tag_addr;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2023-01-30 21:13:54 +00:00
|
|
|
BUG_ON(PageHighMem(biv.bv_page));
|
|
|
|
tag_addr = bvec_virt(&biv);
|
|
|
|
if (likely(dio->op == REQ_OP_WRITE))
|
|
|
|
memcpy(tag_ptr, tag_addr, tag_now);
|
|
|
|
else
|
|
|
|
memcpy(tag_addr, tag_ptr, tag_now);
|
|
|
|
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
|
|
|
|
tag_ptr += tag_now;
|
|
|
|
tag_todo -= tag_now;
|
|
|
|
} while (unlikely(tag_todo));
|
|
|
|
} else if (likely(dio->op == REQ_OP_WRITE))
|
|
|
|
memset(tag_ptr, 0, tag_todo);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (likely(dio->op == REQ_OP_WRITE)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_sector *js;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int s;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
js = access_journal_data(ic, journal_section, journal_entry);
|
2017-04-18 20:51:52 +00:00
|
|
|
memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
|
|
|
|
|
|
|
|
s = 0;
|
|
|
|
do {
|
|
|
|
je->last_bytes[s] = js[s].commit_id;
|
|
|
|
} while (++s < ic->sectors_per_block);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (ic->internal_hash) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(digest_size > ic->tag_size)) {
|
2018-08-07 21:18:39 +00:00
|
|
|
char checksums_onstack[HASH_MAX_DIGESTSIZE];
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
|
2017-04-18 20:51:52 +00:00
|
|
|
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
|
2017-01-04 19:23:53 +00:00
|
|
|
} else
|
2017-04-18 20:51:52 +00:00
|
|
|
integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
journal_entry_set_sector(je, logical_sector);
|
|
|
|
}
|
2017-04-18 20:51:52 +00:00
|
|
|
logical_sector += ic->sectors_per_block;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
journal_entry++;
|
|
|
|
if (unlikely(journal_entry == ic->journal_section_entries)) {
|
|
|
|
journal_entry = 0;
|
|
|
|
journal_section++;
|
|
|
|
wraparound_section(ic, &journal_section);
|
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
|
|
|
|
} while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(dio->op == REQ_OP_READ))
|
2017-01-04 19:23:53 +00:00
|
|
|
flush_dcache_page(bv.bv_page);
|
2021-10-19 13:44:04 +00:00
|
|
|
kunmap_local(mem);
|
2017-01-04 19:23:53 +00:00
|
|
|
} while (n_sectors);
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (likely(dio->op == REQ_OP_WRITE)) {
|
2017-01-04 19:23:53 +00:00
|
|
|
smp_mb();
|
|
|
|
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
|
|
|
|
wake_up(&ic->copy_to_journal_wait);
|
2023-02-02 16:10:52 +00:00
|
|
|
if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
|
2017-01-04 19:23:53 +00:00
|
|
|
queue_work(ic->commit_wq, &ic->commit_work);
|
2023-02-02 16:10:52 +00:00
|
|
|
else
|
2017-01-04 19:23:53 +00:00
|
|
|
schedule_autocommit(ic);
|
2023-02-02 16:10:52 +00:00
|
|
|
} else
|
2017-01-04 19:23:53 +00:00
|
|
|
remove_range(ic, &dio->range);
|
|
|
|
|
|
|
|
if (unlikely(bio->bi_iter.bi_size)) {
|
|
|
|
sector_t area, offset;
|
|
|
|
|
|
|
|
dio->range.logical_sector = logical_sector;
|
|
|
|
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
|
|
|
|
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = dio->ic;
|
|
|
|
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_section, journal_entry;
|
|
|
|
unsigned int journal_read_pos;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct completion read_comp;
|
2020-03-22 19:42:27 +00:00
|
|
|
bool discard_retried = false;
|
2020-03-22 19:42:26 +00:00
|
|
|
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
|
|
|
|
need_sync_io = true;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (need_sync_io && from_map) {
|
|
|
|
INIT_WORK(&dio->work, integrity_bio_wait);
|
2020-02-17 12:43:03 +00:00
|
|
|
queue_work(ic->offload_wq, &dio->work);
|
2017-01-04 19:23:53 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
lock_retry:
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
retry:
|
|
|
|
if (unlikely(dm_integrity_failed(ic))) {
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
do_endio(ic, bio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
dio->range.n_sectors = bio_sectors(bio);
|
|
|
|
journal_read_pos = NOT_FOUND;
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
|
|
|
|
if (dio->op == REQ_OP_WRITE) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int next_entry, i, pos;
|
|
|
|
unsigned int ws, we, range_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2017-07-19 15:23:40 +00:00
|
|
|
dio->range.n_sectors = min(dio->range.n_sectors,
|
2019-04-29 12:57:21 +00:00
|
|
|
(sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
|
2018-07-03 18:13:26 +00:00
|
|
|
if (unlikely(!dio->range.n_sectors)) {
|
|
|
|
if (from_map)
|
|
|
|
goto offload_to_thread;
|
|
|
|
sleep_on_endio_wait(ic);
|
|
|
|
goto retry;
|
|
|
|
}
|
2017-07-19 15:23:40 +00:00
|
|
|
range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
|
|
|
|
ic->free_sectors -= range_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
journal_section = ic->free_section;
|
|
|
|
journal_entry = ic->free_section_entry;
|
|
|
|
|
2017-07-19 15:23:40 +00:00
|
|
|
next_entry = ic->free_section_entry + range_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->free_section_entry = next_entry % ic->journal_section_entries;
|
|
|
|
ic->free_section += next_entry / ic->journal_section_entries;
|
|
|
|
ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
|
|
|
|
wraparound_section(ic, &ic->free_section);
|
|
|
|
|
|
|
|
pos = journal_section * ic->journal_section_entries + journal_entry;
|
|
|
|
ws = journal_section;
|
|
|
|
we = journal_entry;
|
2017-04-18 20:51:52 +00:00
|
|
|
i = 0;
|
|
|
|
do {
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_entry *je;
|
|
|
|
|
|
|
|
add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
|
|
|
|
pos++;
|
|
|
|
if (unlikely(pos >= ic->journal_entries))
|
|
|
|
pos = 0;
|
|
|
|
|
|
|
|
je = access_journal_entry(ic, ws, we);
|
|
|
|
BUG_ON(!journal_entry_is_unused(je));
|
|
|
|
journal_entry_set_inprogress(je);
|
|
|
|
we++;
|
|
|
|
if (unlikely(we == ic->journal_section_entries)) {
|
|
|
|
we = 0;
|
|
|
|
ws++;
|
|
|
|
wraparound_section(ic, &ws);
|
|
|
|
}
|
2017-04-18 20:51:52 +00:00
|
|
|
} while ((i += ic->sectors_per_block) < dio->range.n_sectors);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
goto journal_read_write;
|
|
|
|
} else {
|
|
|
|
sector_t next_sector;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
|
|
|
|
if (likely(journal_read_pos == NOT_FOUND)) {
|
|
|
|
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
|
|
|
|
dio->range.n_sectors = next_sector - dio->range.logical_sector;
|
|
|
|
} else {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
|
|
|
unsigned int jp = journal_read_pos + 1;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
|
|
|
|
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
|
2017-01-04 19:23:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
dio->range.n_sectors = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-03 18:13:27 +00:00
|
|
|
if (unlikely(!add_new_range(ic, &dio->range, true))) {
|
2017-01-04 19:23:53 +00:00
|
|
|
/*
|
|
|
|
* We must not sleep in the request routine because it could
|
|
|
|
* stall bios on current->bio_list.
|
|
|
|
* So, we offload the bio to a workqueue if we have to sleep.
|
|
|
|
*/
|
|
|
|
if (from_map) {
|
2018-07-03 18:13:26 +00:00
|
|
|
offload_to_thread:
|
2017-01-04 19:23:53 +00:00
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
INIT_WORK(&dio->work, integrity_bio_wait);
|
|
|
|
queue_work(ic->wait_wq, &dio->work);
|
|
|
|
return;
|
|
|
|
}
|
2019-08-10 16:30:27 +00:00
|
|
|
if (journal_read_pos != NOT_FOUND)
|
|
|
|
dio->range.n_sectors = ic->sectors_per_block;
|
2018-07-03 18:13:27 +00:00
|
|
|
wait_and_add_new_range(ic, &dio->range);
|
2019-08-10 16:30:27 +00:00
|
|
|
/*
|
|
|
|
* wait_and_add_new_range drops the spinlock, so the journal
|
|
|
|
* may have been changed arbitrarily. We need to recheck.
|
|
|
|
* To simplify the code, we restrict I/O size to just one block.
|
|
|
|
*/
|
|
|
|
if (journal_read_pos != NOT_FOUND) {
|
|
|
|
sector_t next_sector;
|
2023-02-01 22:42:29 +00:00
|
|
|
unsigned int new_pos;
|
|
|
|
|
|
|
|
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
|
2019-08-10 16:30:27 +00:00
|
|
|
if (unlikely(new_pos != journal_read_pos)) {
|
|
|
|
remove_range_unlocked(ic, &dio->range);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
2020-03-22 19:42:27 +00:00
|
|
|
if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
|
|
|
|
sector_t next_sector;
|
2023-02-01 22:42:29 +00:00
|
|
|
unsigned int new_pos;
|
|
|
|
|
|
|
|
new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
|
2020-03-22 19:42:27 +00:00
|
|
|
if (unlikely(new_pos != NOT_FOUND) ||
|
|
|
|
unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
|
|
|
|
remove_range_unlocked(ic, &dio->range);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
queue_work(ic->commit_wq, &ic->commit_work);
|
|
|
|
flush_workqueue(ic->commit_wq);
|
|
|
|
queue_work(ic->writer_wq, &ic->writer_work);
|
|
|
|
flush_workqueue(ic->writer_wq);
|
|
|
|
discard_retried = true;
|
|
|
|
goto lock_retry;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
if (unlikely(journal_read_pos != NOT_FOUND)) {
|
|
|
|
journal_section = journal_read_pos / ic->journal_section_entries;
|
|
|
|
journal_entry = journal_read_pos % ic->journal_section_entries;
|
|
|
|
goto journal_read_write;
|
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
|
2019-05-09 19:25:49 +00:00
|
|
|
if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
|
|
|
|
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
|
|
|
|
struct bitmap_block_status *bbs;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
|
2019-04-29 12:57:24 +00:00
|
|
|
spin_lock(&bbs->bio_queue_lock);
|
|
|
|
bio_list_add(&bbs->bio_queue, bio);
|
|
|
|
spin_unlock(&bbs->bio_queue_lock);
|
|
|
|
queue_work(ic->writer_wq, &bbs->work);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
dio->in_flight = (atomic_t)ATOMIC_INIT(2);
|
|
|
|
|
|
|
|
if (need_sync_io) {
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&read_comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
dio->completion = &read_comp;
|
|
|
|
} else
|
|
|
|
dio->completion = NULL;
|
|
|
|
|
2020-02-28 23:11:53 +00:00
|
|
|
dm_bio_record(&dio->bio_details, bio);
|
2017-08-23 17:10:32 +00:00
|
|
|
bio_set_dev(bio, ic->dev->bdev);
|
2017-01-04 19:23:53 +00:00
|
|
|
bio->bi_integrity = NULL;
|
|
|
|
bio->bi_opf &= ~REQ_INTEGRITY;
|
|
|
|
bio->bi_end_io = integrity_end_io;
|
|
|
|
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
|
2020-02-28 23:11:53 +00:00
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
|
|
|
|
integrity_metadata(&dio->work);
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, false);
|
2020-03-22 19:42:26 +00:00
|
|
|
|
|
|
|
dio->in_flight = (atomic_t)ATOMIC_INIT(1);
|
|
|
|
dio->completion = NULL;
|
|
|
|
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2020-03-22 19:42:26 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-07-01 08:59:44 +00:00
|
|
|
submit_bio_noacct(bio);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (need_sync_io) {
|
|
|
|
wait_for_completion_io(&read_comp);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
|
2018-07-03 18:13:33 +00:00
|
|
|
dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
|
|
|
|
goto skip_check;
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
2019-05-09 19:25:49 +00:00
|
|
|
if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
|
|
|
|
dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
|
2019-04-29 12:57:24 +00:00
|
|
|
goto skip_check;
|
|
|
|
}
|
|
|
|
|
2017-07-31 07:22:20 +00:00
|
|
|
if (likely(!bio->bi_status))
|
|
|
|
integrity_metadata(&dio->work);
|
|
|
|
else
|
2018-07-03 18:13:33 +00:00
|
|
|
skip_check:
|
2017-07-31 07:22:20 +00:00
|
|
|
dec_in_flight(dio);
|
2017-01-04 19:23:53 +00:00
|
|
|
} else {
|
|
|
|
INIT_WORK(&dio->work, integrity_metadata);
|
|
|
|
queue_work(ic->metadata_wq, &dio->work);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
journal_read_write:
|
|
|
|
if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
|
|
|
|
goto lock_retry;
|
|
|
|
|
|
|
|
do_endio_flush(ic, dio);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void integrity_bio_wait(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
|
|
|
|
|
|
|
|
dm_integrity_map_continue(dio, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pad_uncommitted(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
if (ic->free_section_entry) {
|
|
|
|
ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
|
|
|
|
ic->free_section_entry = 0;
|
|
|
|
ic->free_section++;
|
|
|
|
wraparound_section(ic, &ic->free_section);
|
|
|
|
ic->n_uncommitted_sections++;
|
|
|
|
}
|
2019-04-29 12:57:24 +00:00
|
|
|
if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
|
2019-05-09 19:25:49 +00:00
|
|
|
(ic->n_uncommitted_sections + ic->n_committed_sections) *
|
|
|
|
ic->journal_section_entries + ic->free_sectors)) {
|
|
|
|
DMCRIT("journal_sections %u, journal_section_entries %u, "
|
|
|
|
"n_uncommitted_sections %u, n_committed_sections %u, "
|
|
|
|
"journal_section_entries %u, free_sectors %u",
|
|
|
|
ic->journal_sections, ic->journal_section_entries,
|
|
|
|
ic->n_uncommitted_sections, ic->n_committed_sections,
|
|
|
|
ic->journal_section_entries, ic->free_sectors);
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_commit(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int commit_start, commit_sections;
|
|
|
|
unsigned int i, j, n;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct bio *flushes;
|
|
|
|
|
|
|
|
del_timer(&ic->autocommit_timer);
|
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
flushes = bio_list_get(&ic->flush_bio_list);
|
|
|
|
if (unlikely(ic->mode != 'J')) {
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, true);
|
2017-01-04 19:23:53 +00:00
|
|
|
goto release_flush_bios;
|
|
|
|
}
|
|
|
|
|
|
|
|
pad_uncommitted(ic);
|
|
|
|
commit_start = ic->uncommitted_section;
|
|
|
|
commit_sections = ic->n_uncommitted_sections;
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
if (!commit_sections)
|
|
|
|
goto release_flush_bios;
|
|
|
|
|
2022-11-15 17:51:50 +00:00
|
|
|
ic->wrote_to_journal = true;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
i = commit_start;
|
|
|
|
for (n = 0; n < commit_sections; n++) {
|
|
|
|
for (j = 0; j < ic->journal_section_entries; j++) {
|
|
|
|
struct journal_entry *je;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
je = access_journal_entry(ic, i, j);
|
|
|
|
io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
|
|
|
|
}
|
|
|
|
for (j = 0; j < ic->journal_section_sectors; j++) {
|
|
|
|
struct journal_sector *js;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
js = access_journal(ic, i, j);
|
|
|
|
js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
if (unlikely(i >= ic->journal_sections))
|
|
|
|
ic->commit_seq = next_commit_seq(ic->commit_seq);
|
|
|
|
wraparound_section(ic, &i);
|
|
|
|
}
|
|
|
|
smp_rmb();
|
|
|
|
|
|
|
|
write_journal(ic, commit_start, commit_sections);
|
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
ic->uncommitted_section += commit_sections;
|
|
|
|
wraparound_section(ic, &ic->uncommitted_section);
|
|
|
|
ic->n_uncommitted_sections -= commit_sections;
|
|
|
|
ic->n_committed_sections += commit_sections;
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
locking/atomics, dm-integrity: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't pick up some uses, including those
in dm-integrity.c. As a preparatory step, this patch converts the driver
to use {READ,WRITE}_ONCE() consistently.
At the same time, this patch adds the missing include of
<linux/compiler.h> necessary for the {READ,WRITE}_ONCE() definitions.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-1-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 21:07:11 +00:00
|
|
|
if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
|
2017-01-04 19:23:53 +00:00
|
|
|
queue_work(ic->writer_wq, &ic->writer_work);
|
|
|
|
|
|
|
|
release_flush_bios:
|
|
|
|
while (flushes) {
|
|
|
|
struct bio *next = flushes->bi_next;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
flushes->bi_next = NULL;
|
|
|
|
do_endio(ic, flushes);
|
|
|
|
flushes = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void complete_copy_from_journal(unsigned long error, void *context)
|
|
|
|
{
|
|
|
|
struct journal_io *io = context;
|
|
|
|
struct journal_completion *comp = io->comp;
|
|
|
|
struct dm_integrity_c *ic = comp->ic;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
remove_range(ic, &io->range);
|
2018-05-20 22:25:53 +00:00
|
|
|
mempool_free(io, &ic->journal_io_mempool);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (unlikely(error != 0))
|
|
|
|
dm_integrity_io_error(ic, "copying from journal", -EIO);
|
|
|
|
complete_journal_op(comp);
|
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
|
|
|
|
struct journal_entry *je)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int s = 0;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
do {
|
|
|
|
js->commit_id = je->last_bytes[s];
|
|
|
|
js++;
|
|
|
|
} while (++s < ic->sectors_per_block);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
|
|
|
|
unsigned int write_sections, bool from_replay)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i, j, n;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_completion comp;
|
2017-07-19 15:24:08 +00:00
|
|
|
struct blk_plug plug;
|
|
|
|
|
|
|
|
blk_start_plug(&plug);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
comp.ic = ic;
|
|
|
|
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&comp.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
i = write_start;
|
|
|
|
for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
|
|
|
|
#ifndef INTERNAL_VERIFY
|
|
|
|
if (unlikely(from_replay))
|
|
|
|
#endif
|
|
|
|
rw_section_mac(ic, i, false);
|
|
|
|
for (j = 0; j < ic->journal_section_entries; j++) {
|
|
|
|
struct journal_entry *je = access_journal_entry(ic, i, j);
|
|
|
|
sector_t sec, area, offset;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int k, l, next_loop;
|
2017-01-04 19:23:53 +00:00
|
|
|
sector_t metadata_block;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int metadata_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct journal_io *io;
|
|
|
|
|
|
|
|
if (journal_entry_is_unused(je))
|
|
|
|
continue;
|
|
|
|
BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
|
|
|
|
sec = journal_entry_get_sector(je);
|
2017-04-18 20:51:52 +00:00
|
|
|
if (unlikely(from_replay)) {
|
2023-01-25 20:14:58 +00:00
|
|
|
if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
|
2017-04-18 20:51:52 +00:00
|
|
|
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
|
|
|
|
sec &= ~(sector_t)(ic->sectors_per_block - 1);
|
|
|
|
}
|
2022-03-26 14:24:56 +00:00
|
|
|
if (unlikely(sec >= ic->provided_data_sectors)) {
|
|
|
|
journal_entry_set_unused(je);
|
|
|
|
continue;
|
|
|
|
}
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
get_area_and_offset(ic, sec, &area, &offset);
|
2017-04-18 20:51:52 +00:00
|
|
|
restore_last_bytes(ic, access_journal_data(ic, i, j), je);
|
2017-01-04 19:23:53 +00:00
|
|
|
for (k = j + 1; k < ic->journal_section_entries; k++) {
|
|
|
|
struct journal_entry *je2 = access_journal_entry(ic, i, k);
|
|
|
|
sector_t sec2, area2, offset2;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (journal_entry_is_unused(je2))
|
|
|
|
break;
|
|
|
|
BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
|
|
|
|
sec2 = journal_entry_get_sector(je2);
|
2020-03-22 19:42:23 +00:00
|
|
|
if (unlikely(sec2 >= ic->provided_data_sectors))
|
|
|
|
break;
|
2017-01-04 19:23:53 +00:00
|
|
|
get_area_and_offset(ic, sec2, &area2, &offset2);
|
2017-04-18 20:51:52 +00:00
|
|
|
if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
|
2017-01-04 19:23:53 +00:00
|
|
|
break;
|
2017-04-18 20:51:52 +00:00
|
|
|
restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
next_loop = k - 1;
|
|
|
|
|
2018-05-20 22:25:53 +00:00
|
|
|
io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
|
2017-01-04 19:23:53 +00:00
|
|
|
io->comp = ∁
|
|
|
|
io->range.logical_sector = sec;
|
2017-04-18 20:51:52 +00:00
|
|
|
io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
2019-04-29 12:57:22 +00:00
|
|
|
add_new_range_and_wait(ic, &io->range);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (likely(!from_replay)) {
|
|
|
|
struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
|
|
|
|
|
|
|
|
/* don't write if there is newer committed sector */
|
|
|
|
while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
|
|
|
|
struct journal_entry *je2 = access_journal_entry(ic, i, j);
|
|
|
|
|
|
|
|
journal_entry_set_unused(je2);
|
|
|
|
remove_journal_node(ic, §ion_node[j]);
|
|
|
|
j++;
|
2017-04-18 20:51:52 +00:00
|
|
|
sec += ic->sectors_per_block;
|
|
|
|
offset += ic->sectors_per_block;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
|
|
|
|
struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
|
|
|
|
|
|
|
|
journal_entry_set_unused(je2);
|
|
|
|
remove_journal_node(ic, §ion_node[k - 1]);
|
|
|
|
k--;
|
|
|
|
}
|
|
|
|
if (j == k) {
|
|
|
|
remove_range_unlocked(ic, &io->range);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
2018-05-20 22:25:53 +00:00
|
|
|
mempool_free(io, &ic->journal_io_mempool);
|
2017-01-04 19:23:53 +00:00
|
|
|
goto skip_io;
|
|
|
|
}
|
2023-02-02 16:10:52 +00:00
|
|
|
for (l = j; l < k; l++)
|
2017-01-04 19:23:53 +00:00
|
|
|
remove_journal_node(ic, §ion_node[l]);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
|
|
|
|
for (l = j; l < k; l++) {
|
|
|
|
int r;
|
|
|
|
struct journal_entry *je2 = access_journal_entry(ic, i, l);
|
|
|
|
|
|
|
|
if (
|
|
|
|
#ifndef INTERNAL_VERIFY
|
|
|
|
unlikely(from_replay) &&
|
|
|
|
#endif
|
|
|
|
ic->internal_hash) {
|
2018-08-07 21:18:39 +00:00
|
|
|
char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
|
2017-01-04 19:23:53 +00:00
|
|
|
(char *)access_journal_data(ic, i, l), test_tag);
|
2021-09-04 09:59:29 +00:00
|
|
|
if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
journal_entry_set_unused(je2);
|
2017-04-18 20:51:52 +00:00
|
|
|
r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->tag_size, TAG_WRITE);
|
2023-02-02 16:10:52 +00:00
|
|
|
if (unlikely(r))
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_io_error(ic, "reading tags", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic_inc(&comp.in_flight);
|
2017-04-18 20:51:52 +00:00
|
|
|
copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
|
|
|
|
(k - j) << ic->sb->log2_sectors_per_block,
|
|
|
|
get_data_sector(ic, area, offset),
|
2017-01-04 19:23:53 +00:00
|
|
|
complete_copy_from_journal, io);
|
|
|
|
skip_io:
|
|
|
|
j = next_loop;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dm_bufio_write_dirty_buffers_async(ic->bufio);
|
|
|
|
|
2017-07-19 15:24:08 +00:00
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
complete_journal_op(&comp);
|
|
|
|
wait_for_completion_io(&comp.comp);
|
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, true);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_writer(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int write_start, write_sections;
|
|
|
|
unsigned int prev_free_sectors;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
write_start = ic->committed_section;
|
|
|
|
write_sections = ic->n_committed_sections;
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
if (!write_sections)
|
|
|
|
return;
|
|
|
|
|
|
|
|
do_journal_write(ic, write_start, write_sections, false);
|
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
ic->committed_section += write_sections;
|
|
|
|
wraparound_section(ic, &ic->committed_section);
|
|
|
|
ic->n_committed_sections -= write_sections;
|
|
|
|
|
|
|
|
prev_free_sectors = ic->free_sectors;
|
|
|
|
ic->free_sectors += write_sections * ic->journal_section_entries;
|
|
|
|
if (unlikely(!prev_free_sectors))
|
|
|
|
wake_up_locked(&ic->endio_wait);
|
|
|
|
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
static void recalc_write_super(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, false);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (dm_integrity_failed(ic))
|
|
|
|
return;
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing superblock", r);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void integrity_recalc(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
|
2023-06-26 14:46:00 +00:00
|
|
|
size_t recalc_tags_size;
|
|
|
|
u8 *recalc_buffer = NULL;
|
|
|
|
u8 *recalc_tags = NULL;
|
2018-07-03 18:13:33 +00:00
|
|
|
struct dm_integrity_range range;
|
|
|
|
struct dm_io_request io_req;
|
|
|
|
struct dm_io_region io_loc;
|
|
|
|
sector_t area, offset;
|
|
|
|
sector_t metadata_block;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int metadata_offset;
|
2019-04-29 12:57:24 +00:00
|
|
|
sector_t logical_sector, n_sectors;
|
2018-07-03 18:13:33 +00:00
|
|
|
__u8 *t;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2018-07-03 18:13:33 +00:00
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int super_counter = 0;
|
2023-06-26 14:46:57 +00:00
|
|
|
unsigned recalc_sectors = RECALC_SECTORS;
|
2018-07-03 18:13:33 +00:00
|
|
|
|
2023-06-26 14:46:57 +00:00
|
|
|
retry:
|
|
|
|
recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
|
2023-06-26 14:46:00 +00:00
|
|
|
if (!recalc_buffer) {
|
2023-06-26 14:46:57 +00:00
|
|
|
oom:
|
|
|
|
recalc_sectors >>= 1;
|
|
|
|
if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
|
|
|
|
goto retry;
|
2023-06-26 14:46:00 +00:00
|
|
|
DMCRIT("out of memory for recalculate buffer - recalculation disabled");
|
|
|
|
goto free_ret;
|
|
|
|
}
|
2023-06-26 14:46:57 +00:00
|
|
|
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
|
2023-06-26 14:46:00 +00:00
|
|
|
if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
|
|
|
|
recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
|
|
|
|
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
|
|
|
|
if (!recalc_tags) {
|
2023-06-26 14:46:57 +00:00
|
|
|
vfree(recalc_buffer);
|
2023-07-03 15:12:39 +00:00
|
|
|
recalc_buffer = NULL;
|
2023-06-26 14:46:57 +00:00
|
|
|
goto oom;
|
2023-06-26 14:46:00 +00:00
|
|
|
}
|
2018-07-03 18:13:33 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
|
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
next_chunk:
|
|
|
|
|
2020-07-23 14:42:09 +00:00
|
|
|
if (unlikely(dm_post_suspending(ic->ti)))
|
2018-07-03 18:13:33 +00:00
|
|
|
goto unlock_ret;
|
|
|
|
|
|
|
|
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
|
|
|
|
if (ic->mode == 'B') {
|
2020-08-31 13:25:41 +00:00
|
|
|
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
|
2019-04-29 12:57:24 +00:00
|
|
|
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
|
|
|
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
|
|
|
|
}
|
2018-07-03 18:13:33 +00:00
|
|
|
goto unlock_ret;
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
2018-07-03 18:13:33 +00:00
|
|
|
|
|
|
|
get_area_and_offset(ic, range.logical_sector, &area, &offset);
|
2023-06-26 14:46:57 +00:00
|
|
|
range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (!ic->meta_dev)
|
2023-01-25 20:14:58 +00:00
|
|
|
range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
|
2018-07-03 18:13:33 +00:00
|
|
|
|
2019-04-29 12:57:22 +00:00
|
|
|
add_new_range_and_wait(ic, &range);
|
2018-07-03 18:13:33 +00:00
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
2019-04-29 12:57:24 +00:00
|
|
|
logical_sector = range.logical_sector;
|
|
|
|
n_sectors = range.n_sectors;
|
|
|
|
|
|
|
|
if (ic->mode == 'B') {
|
2023-02-02 16:10:52 +00:00
|
|
|
if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
|
2019-04-29 12:57:24 +00:00
|
|
|
goto advance_and_next;
|
2023-02-02 16:10:52 +00:00
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
|
|
|
|
ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
|
2019-04-29 12:57:24 +00:00
|
|
|
logical_sector += ic->sectors_per_block;
|
|
|
|
n_sectors -= ic->sectors_per_block;
|
|
|
|
cond_resched();
|
|
|
|
}
|
2019-05-09 19:25:49 +00:00
|
|
|
while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
|
|
|
|
ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
|
2019-04-29 12:57:24 +00:00
|
|
|
n_sectors -= ic->sectors_per_block;
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
get_area_and_offset(ic, logical_sector, &area, &offset);
|
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:22 +00:00
|
|
|
DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
|
2018-07-03 18:13:33 +00:00
|
|
|
|
|
|
|
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
|
|
|
|
recalc_write_super(ic);
|
2023-02-02 16:10:52 +00:00
|
|
|
if (ic->mode == 'B')
|
2019-04-29 12:57:24 +00:00
|
|
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
|
2023-02-02 16:10:52 +00:00
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
super_counter = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(dm_integrity_failed(ic)))
|
|
|
|
goto err;
|
|
|
|
|
2022-07-14 18:06:47 +00:00
|
|
|
io_req.bi_opf = REQ_OP_READ;
|
2021-05-12 12:28:43 +00:00
|
|
|
io_req.mem.type = DM_IO_VMA;
|
2023-06-26 14:46:00 +00:00
|
|
|
io_req.mem.ptr.addr = recalc_buffer;
|
2021-05-12 12:28:43 +00:00
|
|
|
io_req.notify.fn = NULL;
|
|
|
|
io_req.client = ic->io;
|
|
|
|
io_loc.bdev = ic->dev->bdev;
|
|
|
|
io_loc.sector = get_data_sector(ic, area, offset);
|
|
|
|
io_loc.count = n_sectors;
|
2018-07-03 18:13:33 +00:00
|
|
|
|
2021-05-12 12:28:43 +00:00
|
|
|
r = dm_io(&io_req, 1, &io_loc, NULL);
|
|
|
|
if (unlikely(r)) {
|
|
|
|
dm_integrity_io_error(ic, "reading data", r);
|
|
|
|
goto err;
|
|
|
|
}
|
2018-07-03 18:13:33 +00:00
|
|
|
|
2023-06-26 14:46:00 +00:00
|
|
|
t = recalc_tags;
|
2021-05-12 12:28:43 +00:00
|
|
|
for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
|
2023-06-26 14:46:00 +00:00
|
|
|
integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
|
2021-05-12 12:28:43 +00:00
|
|
|
t += ic->tag_size;
|
2018-07-03 18:13:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
|
|
|
|
|
2023-06-26 14:46:00 +00:00
|
|
|
r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (unlikely(r)) {
|
|
|
|
dm_integrity_io_error(ic, "writing tags", r);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2020-08-31 13:25:41 +00:00
|
|
|
if (ic->mode == 'B') {
|
|
|
|
sector_t start, end;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-08-31 13:25:41 +00:00
|
|
|
start = (range.logical_sector >>
|
|
|
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
|
|
|
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
|
|
|
end = ((range.logical_sector + range.n_sectors) >>
|
|
|
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
|
|
|
|
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
|
|
|
block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
advance_and_next:
|
|
|
|
cond_resched();
|
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
remove_range_unlocked(ic, &range);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
|
|
|
|
goto next_chunk;
|
|
|
|
|
|
|
|
err:
|
|
|
|
remove_range(ic, &range);
|
2023-06-26 14:46:00 +00:00
|
|
|
goto free_ret;
|
2018-07-03 18:13:33 +00:00
|
|
|
|
|
|
|
unlock_ret:
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
|
|
|
recalc_write_super(ic);
|
2023-06-26 14:46:00 +00:00
|
|
|
|
|
|
|
free_ret:
|
|
|
|
vfree(recalc_buffer);
|
|
|
|
kvfree(recalc_tags);
|
2018-07-03 18:13:33 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
static void bitmap_block_work(struct work_struct *w)
|
|
|
|
{
|
|
|
|
struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
|
|
|
|
struct dm_integrity_c *ic = bbs->ic;
|
|
|
|
struct bio *bio;
|
|
|
|
struct bio_list bio_queue;
|
|
|
|
struct bio_list waiting;
|
|
|
|
|
|
|
|
bio_list_init(&waiting);
|
|
|
|
|
|
|
|
spin_lock(&bbs->bio_queue_lock);
|
|
|
|
bio_queue = bbs->bio_queue;
|
|
|
|
bio_list_init(&bbs->bio_queue);
|
|
|
|
spin_unlock(&bbs->bio_queue_lock);
|
|
|
|
|
|
|
|
while ((bio = bio_list_pop(&bio_queue))) {
|
|
|
|
struct dm_integrity_io *dio;
|
|
|
|
|
|
|
|
dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
|
|
|
|
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
|
2019-04-29 12:57:24 +00:00
|
|
|
remove_range(ic, &dio->range);
|
|
|
|
INIT_WORK(&dio->work, integrity_bio_wait);
|
2020-02-17 12:43:03 +00:00
|
|
|
queue_work(ic->offload_wq, &dio->work);
|
2019-04-29 12:57:24 +00:00
|
|
|
} else {
|
2019-05-09 19:25:49 +00:00
|
|
|
block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
|
|
|
|
dio->range.n_sectors, BITMAP_OP_SET);
|
2019-04-29 12:57:24 +00:00
|
|
|
bio_list_add(&waiting, bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bio_list_empty(&waiting))
|
|
|
|
return;
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
|
2019-05-09 19:25:49 +00:00
|
|
|
bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
|
|
|
|
BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
while ((bio = bio_list_pop(&waiting))) {
|
|
|
|
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
|
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
|
|
|
|
dio->range.n_sectors, BITMAP_OP_SET);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
remove_range(ic, &dio->range);
|
|
|
|
INIT_WORK(&dio->work, integrity_bio_wait);
|
2020-02-17 12:43:03 +00:00
|
|
|
queue_work(ic->offload_wq, &dio->work);
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bitmap_flush_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
|
|
|
|
struct dm_integrity_range range;
|
|
|
|
unsigned long limit;
|
2019-04-29 12:57:26 +00:00
|
|
|
struct bio *bio;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, false);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
range.logical_sector = 0;
|
|
|
|
range.n_sectors = ic->provided_data_sectors;
|
|
|
|
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
add_new_range_and_wait(ic, &range);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, true);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
limit = ic->provided_data_sectors;
|
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
|
|
|
limit = le64_to_cpu(ic->sb->recalc_sector)
|
|
|
|
>> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
|
|
|
|
<< (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
|
|
|
|
}
|
2019-04-29 12:57:26 +00:00
|
|
|
/*DEBUG_print("zeroing journal\n");*/
|
2019-04-29 12:57:24 +00:00
|
|
|
block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
|
|
|
|
block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
|
2019-05-09 19:25:49 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
2019-04-29 12:57:26 +00:00
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
remove_range_unlocked(ic, &range);
|
|
|
|
while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
|
|
|
|
bio_endio(bio);
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
|
|
|
spin_lock_irq(&ic->endio_wait.lock);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&ic->endio_wait.lock);
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
|
|
|
|
unsigned int n_sections, unsigned char commit_seq)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i, j, n;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (!n_sections)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (n = 0; n < n_sections; n++) {
|
|
|
|
i = start_section + n;
|
|
|
|
wraparound_section(ic, &i);
|
|
|
|
for (j = 0; j < ic->journal_section_sectors; j++) {
|
|
|
|
struct journal_sector *js = access_journal(ic, i, j);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2021-12-13 22:33:25 +00:00
|
|
|
BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
|
|
|
|
memset(&js->sectors, 0, sizeof(js->sectors));
|
2017-01-04 19:23:53 +00:00
|
|
|
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
|
|
|
|
}
|
|
|
|
for (j = 0; j < ic->journal_section_entries; j++) {
|
|
|
|
struct journal_entry *je = access_journal_entry(ic, i, j);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
journal_entry_set_unused(je);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
write_journal(ic, start_section, n_sections);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
unsigned char k;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
for (k = 0; k < N_COMMIT_IDS; k++) {
|
|
|
|
if (dm_integrity_commit_id(ic, i, j, k) == id)
|
|
|
|
return k;
|
|
|
|
}
|
|
|
|
dm_integrity_io_error(ic, "journal commit id", -EIO);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void replay_journal(struct dm_integrity_c *ic)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i, j;
|
2017-01-04 19:23:53 +00:00
|
|
|
bool used_commit_ids[N_COMMIT_IDS];
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int max_commit_id_sections[N_COMMIT_IDS];
|
|
|
|
unsigned int write_start, write_sections;
|
|
|
|
unsigned int continue_section;
|
2017-01-04 19:23:53 +00:00
|
|
|
bool journal_empty;
|
|
|
|
unsigned char unused, last_used, want_commit_seq;
|
|
|
|
|
2017-03-17 16:40:51 +00:00
|
|
|
if (ic->mode == 'R')
|
|
|
|
return;
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->journal_uptodate)
|
|
|
|
return;
|
|
|
|
|
|
|
|
last_used = 0;
|
|
|
|
write_start = 0;
|
|
|
|
|
|
|
|
if (!ic->just_formatted) {
|
|
|
|
DEBUG_print("reading journal\n");
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->journal_io)
|
|
|
|
DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
|
|
|
|
if (ic->journal_io) {
|
|
|
|
struct journal_completion crypt_comp;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp.ic = ic;
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&crypt_comp.comp);
|
2017-01-04 19:23:53 +00:00
|
|
|
crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
|
|
|
|
encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
|
|
|
|
wait_for_completion(&crypt_comp.comp);
|
|
|
|
}
|
|
|
|
DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dm_integrity_failed(ic))
|
|
|
|
goto clear_journal;
|
|
|
|
|
|
|
|
journal_empty = true;
|
2023-02-07 21:16:53 +00:00
|
|
|
memset(used_commit_ids, 0, sizeof(used_commit_ids));
|
|
|
|
memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
|
2017-01-04 19:23:53 +00:00
|
|
|
for (i = 0; i < ic->journal_sections; i++) {
|
|
|
|
for (j = 0; j < ic->journal_section_sectors; j++) {
|
|
|
|
int k;
|
|
|
|
struct journal_sector *js = access_journal(ic, i, j);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
k = find_commit_seq(ic, i, j, js->commit_id);
|
|
|
|
if (k < 0)
|
|
|
|
goto clear_journal;
|
|
|
|
used_commit_ids[k] = true;
|
|
|
|
max_commit_id_sections[k] = i;
|
|
|
|
}
|
|
|
|
if (journal_empty) {
|
|
|
|
for (j = 0; j < ic->journal_section_entries; j++) {
|
|
|
|
struct journal_entry *je = access_journal_entry(ic, i, j);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!journal_entry_is_unused(je)) {
|
|
|
|
journal_empty = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!used_commit_ids[N_COMMIT_IDS - 1]) {
|
|
|
|
unused = N_COMMIT_IDS - 1;
|
|
|
|
while (unused && !used_commit_ids[unused - 1])
|
|
|
|
unused--;
|
|
|
|
} else {
|
|
|
|
for (unused = 0; unused < N_COMMIT_IDS; unused++)
|
|
|
|
if (!used_commit_ids[unused])
|
|
|
|
break;
|
|
|
|
if (unused == N_COMMIT_IDS) {
|
|
|
|
dm_integrity_io_error(ic, "journal commit ids", -EIO);
|
|
|
|
goto clear_journal;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
|
|
|
|
unused, used_commit_ids[0], used_commit_ids[1],
|
|
|
|
used_commit_ids[2], used_commit_ids[3]);
|
|
|
|
|
|
|
|
last_used = prev_commit_seq(unused);
|
|
|
|
want_commit_seq = prev_commit_seq(last_used);
|
|
|
|
|
|
|
|
if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
|
|
|
|
journal_empty = true;
|
|
|
|
|
|
|
|
write_start = max_commit_id_sections[last_used] + 1;
|
|
|
|
if (unlikely(write_start >= ic->journal_sections))
|
|
|
|
want_commit_seq = next_commit_seq(want_commit_seq);
|
|
|
|
wraparound_section(ic, &write_start);
|
|
|
|
|
|
|
|
i = write_start;
|
|
|
|
for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
|
|
|
|
for (j = 0; j < ic->journal_section_sectors; j++) {
|
|
|
|
struct journal_sector *js = access_journal(ic, i, j);
|
|
|
|
|
|
|
|
if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
|
|
|
|
/*
|
|
|
|
* This could be caused by crash during writing.
|
|
|
|
* We won't replay the inconsistent part of the
|
|
|
|
* journal.
|
|
|
|
*/
|
|
|
|
DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
|
|
|
|
i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
|
|
|
|
goto brk;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
if (unlikely(i >= ic->journal_sections))
|
|
|
|
want_commit_seq = next_commit_seq(want_commit_seq);
|
|
|
|
wraparound_section(ic, &i);
|
|
|
|
}
|
|
|
|
brk:
|
|
|
|
|
|
|
|
if (!journal_empty) {
|
|
|
|
DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
|
|
|
|
write_sections, write_start, want_commit_seq);
|
|
|
|
do_journal_write(ic, write_start, write_sections, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
|
|
|
|
continue_section = write_start;
|
|
|
|
ic->commit_seq = want_commit_seq;
|
|
|
|
DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
|
|
|
|
} else {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int s;
|
2017-01-04 19:23:53 +00:00
|
|
|
unsigned char erase_seq;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
clear_journal:
|
|
|
|
DEBUG_print("clearing journal\n");
|
|
|
|
|
|
|
|
erase_seq = prev_commit_seq(prev_commit_seq(last_used));
|
|
|
|
s = write_start;
|
|
|
|
init_journal(ic, s, 1, erase_seq);
|
|
|
|
s++;
|
|
|
|
wraparound_section(ic, &s);
|
|
|
|
if (ic->journal_sections >= 2) {
|
|
|
|
init_journal(ic, s, ic->journal_sections - 2, erase_seq);
|
|
|
|
s += ic->journal_sections - 2;
|
|
|
|
wraparound_section(ic, &s);
|
|
|
|
init_journal(ic, s, 1, erase_seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
continue_section = 0;
|
|
|
|
ic->commit_seq = next_commit_seq(erase_seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
ic->committed_section = continue_section;
|
|
|
|
ic->n_committed_sections = 0;
|
|
|
|
|
|
|
|
ic->uncommitted_section = continue_section;
|
|
|
|
ic->n_uncommitted_sections = 0;
|
|
|
|
|
|
|
|
ic->free_section = continue_section;
|
|
|
|
ic->free_section_entry = 0;
|
|
|
|
ic->free_sectors = ic->journal_entries;
|
|
|
|
|
|
|
|
ic->journal_tree_root = RB_ROOT;
|
|
|
|
for (i = 0; i < ic->journal_entries; i++)
|
|
|
|
init_journal_node(&ic->journal_tree[i]);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:26 +00:00
|
|
|
static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
|
2019-04-29 12:57:25 +00:00
|
|
|
{
|
2023-02-06 22:42:32 +00:00
|
|
|
DEBUG_print("%s\n", __func__);
|
2019-04-29 12:57:25 +00:00
|
|
|
|
|
|
|
if (ic->mode == 'B') {
|
2019-04-29 12:57:26 +00:00
|
|
|
ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
|
|
|
|
ic->synchronous_mode = 1;
|
|
|
|
|
2019-04-29 12:57:25 +00:00
|
|
|
cancel_delayed_work_sync(&ic->bitmap_flush_work);
|
|
|
|
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
|
|
|
|
flush_workqueue(ic->commit_wq);
|
|
|
|
}
|
2019-04-29 12:57:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
|
|
|
|
|
2023-02-06 22:42:32 +00:00
|
|
|
DEBUG_print("%s\n", __func__);
|
2019-04-29 12:57:26 +00:00
|
|
|
|
|
|
|
dm_integrity_enter_synchronous_mode(ic);
|
2019-04-29 12:57:25 +00:00
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
static void dm_integrity_postsuspend(struct dm_target *ti)
|
|
|
|
{
|
2023-03-17 01:35:54 +00:00
|
|
|
struct dm_integrity_c *ic = ti->private;
|
2019-04-29 12:57:24 +00:00
|
|
|
int r;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2019-04-29 12:57:25 +00:00
|
|
|
WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
del_timer_sync(&ic->autocommit_timer);
|
|
|
|
|
2018-07-03 18:13:33 +00:00
|
|
|
if (ic->recalc_wq)
|
|
|
|
drain_workqueue(ic->recalc_wq);
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B')
|
|
|
|
cancel_delayed_work_sync(&ic->bitmap_flush_work);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
queue_work(ic->commit_wq, &ic->commit_work);
|
|
|
|
drain_workqueue(ic->commit_wq);
|
|
|
|
|
|
|
|
if (ic->mode == 'J') {
|
2022-11-15 17:48:26 +00:00
|
|
|
queue_work(ic->writer_wq, &ic->writer_work);
|
2017-01-04 19:23:53 +00:00
|
|
|
drain_workqueue(ic->writer_wq);
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, true);
|
2022-11-15 17:51:50 +00:00
|
|
|
if (ic->wrote_to_journal) {
|
|
|
|
init_journal(ic, ic->free_section,
|
|
|
|
ic->journal_sections - ic->free_section, ic->commit_seq);
|
|
|
|
if (ic->free_section) {
|
|
|
|
init_journal(ic, 0, ic->free_section,
|
|
|
|
next_commit_seq(ic->commit_seq));
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
2021-01-08 16:15:56 +00:00
|
|
|
dm_integrity_flush_buffers(ic, true);
|
2019-04-29 12:57:24 +00:00
|
|
|
#if 1
|
2019-05-09 19:25:49 +00:00
|
|
|
/* set to 0 to test bitmap replay code */
|
2019-04-29 12:57:24 +00:00
|
|
|
init_journal(ic, 0, ic->journal_sections, 0);
|
|
|
|
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing superblock", r);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
|
|
|
|
|
|
|
|
ic->journal_uptodate = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_resume(struct dm_target *ti)
|
|
|
|
{
|
2023-03-17 01:35:54 +00:00
|
|
|
struct dm_integrity_c *ic = ti->private;
|
2020-03-22 19:42:25 +00:00
|
|
|
__u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
|
2019-04-29 12:57:24 +00:00
|
|
|
int r;
|
2020-03-22 19:42:25 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
DEBUG_print("resume\n");
|
|
|
|
|
2022-11-15 17:51:50 +00:00
|
|
|
ic->wrote_to_journal = false;
|
|
|
|
|
2020-03-22 19:42:25 +00:00
|
|
|
if (ic->provided_data_sectors != old_provided_data_sectors) {
|
|
|
|
if (ic->provided_data_sectors > old_provided_data_sectors &&
|
|
|
|
ic->mode == 'B' &&
|
|
|
|
ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_READ, 0,
|
2020-03-22 19:42:25 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
|
|
|
block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
|
|
|
|
ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
|
2020-03-22 19:42:25 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
|
2020-03-22 19:42:25 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing superblock", r);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
|
|
|
|
DEBUG_print("resume dirty_bitmap\n");
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_READ, 0,
|
2019-05-09 19:25:49 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
2021-03-23 14:59:45 +00:00
|
|
|
if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
|
|
|
|
!ic->reset_recalculate_flag) {
|
2019-04-29 12:57:24 +00:00
|
|
|
block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
|
|
|
|
block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
|
2019-05-09 19:25:49 +00:00
|
|
|
if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
|
|
|
|
BITMAP_OP_TEST_ALL_CLEAR)) {
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(0);
|
|
|
|
}
|
|
|
|
} else {
|
2019-05-09 19:25:49 +00:00
|
|
|
DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
|
|
|
|
ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
|
|
|
|
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
|
|
|
|
block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
|
|
|
|
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
|
2019-05-09 19:25:49 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
|
2021-03-23 14:59:45 +00:00
|
|
|
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
|
|
|
|
ic->reset_recalculate_flag) {
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(0);
|
|
|
|
}
|
|
|
|
init_journal(ic, 0, ic->journal_sections, 0);
|
|
|
|
replay_journal(ic);
|
|
|
|
ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
|
|
|
|
}
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing superblock", r);
|
|
|
|
} else {
|
|
|
|
replay_journal(ic);
|
2021-03-23 14:59:45 +00:00
|
|
|
if (ic->reset_recalculate_flag) {
|
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(0);
|
|
|
|
}
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
|
|
|
|
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (unlikely(r))
|
|
|
|
dm_integrity_io_error(ic, "writing superblock", r);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2020-02-07 16:42:30 +00:00
|
|
|
block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
|
|
|
|
block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
|
|
|
|
block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
|
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
|
|
|
|
le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
|
|
|
|
block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
|
|
|
|
ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
|
|
|
|
block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
|
|
|
|
ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
|
|
|
|
block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
|
|
|
|
ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
|
|
|
|
}
|
2022-07-14 18:06:52 +00:00
|
|
|
rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
|
2019-05-09 19:25:49 +00:00
|
|
|
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
DEBUG_print("testing recalc: %x\n", ic->sb->flags);
|
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
2018-07-03 18:13:33 +00:00
|
|
|
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:22 +00:00
|
|
|
DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (recalc_pos < ic->provided_data_sectors) {
|
|
|
|
queue_work(ic->recalc_wq, &ic->recalc_work);
|
|
|
|
} else if (recalc_pos > ic->provided_data_sectors) {
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
|
|
|
|
recalc_write_super(ic);
|
|
|
|
}
|
|
|
|
}
|
2019-04-29 12:57:25 +00:00
|
|
|
|
|
|
|
ic->reboot_notifier.notifier_call = dm_integrity_reboot;
|
|
|
|
ic->reboot_notifier.next = NULL;
|
|
|
|
ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
|
|
|
|
WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
|
2019-04-29 12:57:26 +00:00
|
|
|
|
|
|
|
#if 0
|
2019-05-09 19:25:49 +00:00
|
|
|
/* set to 1 to stress test synchronous mode */
|
2019-04-29 12:57:26 +00:00
|
|
|
dm_integrity_enter_synchronous_mode(ic);
|
|
|
|
#endif
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int status_flags, char *result, unsigned int maxlen)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-03-17 01:35:54 +00:00
|
|
|
struct dm_integrity_c *ic = ti->private;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int arg_count;
|
2017-01-04 19:23:53 +00:00
|
|
|
size_t sz = 0;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case STATUSTYPE_INFO:
|
2018-07-03 18:13:28 +00:00
|
|
|
DMEMIT("%llu %llu",
|
2020-04-03 01:11:24 +00:00
|
|
|
(unsigned long long)atomic64_read(&ic->number_of_mismatches),
|
2020-03-22 19:42:22 +00:00
|
|
|
ic->provided_data_sectors);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
|
2020-03-22 19:42:22 +00:00
|
|
|
DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
|
2018-07-03 18:13:33 +00:00
|
|
|
else
|
|
|
|
DMEMIT(" -");
|
2017-01-04 19:23:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case STATUSTYPE_TABLE: {
|
|
|
|
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
watermark_percentage += ic->journal_entries / 2;
|
|
|
|
do_div(watermark_percentage, ic->journal_entries);
|
2019-04-29 12:57:18 +00:00
|
|
|
arg_count = 3;
|
2018-07-03 18:13:30 +00:00
|
|
|
arg_count += !!ic->meta_dev;
|
2017-04-18 20:51:52 +00:00
|
|
|
arg_count += ic->sectors_per_block != 1;
|
2018-07-03 18:13:33 +00:00
|
|
|
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
|
2021-03-23 14:59:45 +00:00
|
|
|
arg_count += ic->reset_recalculate_flag;
|
2020-03-22 19:42:26 +00:00
|
|
|
arg_count += ic->discard;
|
2019-04-29 12:57:18 +00:00
|
|
|
arg_count += ic->mode == 'J';
|
|
|
|
arg_count += ic->mode == 'J';
|
2019-04-29 12:57:24 +00:00
|
|
|
arg_count += ic->mode == 'B';
|
|
|
|
arg_count += ic->mode == 'B';
|
2017-01-04 19:23:53 +00:00
|
|
|
arg_count += !!ic->internal_hash_alg.alg_string;
|
|
|
|
arg_count += !!ic->journal_crypt_alg.alg_string;
|
|
|
|
arg_count += !!ic->journal_mac_alg.alg_string;
|
2019-11-13 11:48:16 +00:00
|
|
|
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
|
2021-01-21 15:09:32 +00:00
|
|
|
arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
|
2021-01-20 18:59:11 +00:00
|
|
|
arg_count += ic->legacy_recalculate;
|
2020-03-22 19:42:22 +00:00
|
|
|
DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->tag_size, ic->mode, arg_count);
|
2018-07-03 18:13:30 +00:00
|
|
|
if (ic->meta_dev)
|
|
|
|
DMEMIT(" meta_device:%s", ic->meta_dev->name);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (ic->sectors_per_block != 1)
|
|
|
|
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
|
2020-02-17 13:11:35 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
|
2018-07-03 18:13:33 +00:00
|
|
|
DMEMIT(" recalculate");
|
2021-03-23 14:59:45 +00:00
|
|
|
if (ic->reset_recalculate_flag)
|
|
|
|
DMEMIT(" reset_recalculate");
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->discard)
|
|
|
|
DMEMIT(" allow_discards");
|
2017-04-18 20:51:50 +00:00
|
|
|
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
|
|
|
|
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
|
|
|
|
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
|
2019-04-29 12:57:18 +00:00
|
|
|
if (ic->mode == 'J') {
|
2023-01-25 20:14:58 +00:00
|
|
|
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
|
2019-04-29 12:57:18 +00:00
|
|
|
DMEMIT(" commit_time:%u", ic->autocommit_msec);
|
|
|
|
}
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
2020-03-22 19:42:22 +00:00
|
|
|
DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
|
2019-04-29 12:57:24 +00:00
|
|
|
DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
|
|
|
|
}
|
2019-11-13 11:48:16 +00:00
|
|
|
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
|
|
|
|
DMEMIT(" fix_padding");
|
2021-01-21 15:09:32 +00:00
|
|
|
if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
|
|
|
|
DMEMIT(" fix_hmac");
|
2021-01-20 18:59:11 +00:00
|
|
|
if (ic->legacy_recalculate)
|
|
|
|
DMEMIT(" legacy_recalculate");
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
#define EMIT_ALG(a, n) \
|
|
|
|
do { \
|
|
|
|
if (ic->a.alg_string) { \
|
|
|
|
DMEMIT(" %s:%s", n, ic->a.alg_string); \
|
|
|
|
if (ic->a.key_string) \
|
|
|
|
DMEMIT(":%s", ic->a.key_string);\
|
|
|
|
} \
|
|
|
|
} while (0)
|
2017-04-18 20:51:50 +00:00
|
|
|
EMIT_ALG(internal_hash_alg, "internal_hash");
|
|
|
|
EMIT_ALG(journal_crypt_alg, "journal_crypt");
|
|
|
|
EMIT_ALG(journal_mac_alg, "journal_mac");
|
2017-01-04 19:23:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-07-13 00:49:03 +00:00
|
|
|
case STATUSTYPE_IMA:
|
|
|
|
DMEMIT_TARGET_NAME_VERSION(ti->type);
|
|
|
|
DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
|
|
|
|
ic->dev->name, ic->start, ic->tag_size, ic->mode);
|
|
|
|
|
|
|
|
if (ic->meta_dev)
|
|
|
|
DMEMIT(",meta_device=%s", ic->meta_dev->name);
|
|
|
|
if (ic->sectors_per_block != 1)
|
|
|
|
DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
|
|
|
|
|
|
|
|
DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
|
|
|
|
'y' : 'n');
|
|
|
|
DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
|
|
|
|
DMEMIT(",fix_padding=%c",
|
|
|
|
((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
|
|
|
|
DMEMIT(",fix_hmac=%c",
|
|
|
|
((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
|
|
|
|
DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
|
|
|
|
|
|
|
|
DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
|
|
|
|
DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
|
|
|
|
DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
|
|
|
|
DMEMIT(";");
|
|
|
|
break;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dm_integrity_iterate_devices(struct dm_target *ti,
|
|
|
|
iterate_devices_callout_fn fn, void *data)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = ti->private;
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev)
|
|
|
|
return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
|
|
|
|
else
|
|
|
|
return fn(ti, ic->dev, 0, ti->len, data);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:52 +00:00
|
|
|
static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = ti->private;
|
|
|
|
|
|
|
|
if (ic->sectors_per_block > 1) {
|
|
|
|
limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
|
|
|
|
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
|
|
|
|
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
|
2022-11-10 18:45:00 +00:00
|
|
|
limits->dma_alignment = limits->logical_block_size - 1;
|
2017-04-18 20:51:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
static void calculate_journal_section_size(struct dm_integrity_c *ic)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector_space = JOURNAL_SECTOR_DATA;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
|
2017-04-18 20:51:52 +00:00
|
|
|
ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
|
2017-01-04 19:23:53 +00:00
|
|
|
JOURNAL_ENTRY_ROUNDUP);
|
|
|
|
|
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
|
|
|
|
sector_space -= JOURNAL_MAC_PER_SECTOR;
|
|
|
|
ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
|
|
|
|
ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
|
2017-04-18 20:51:52 +00:00
|
|
|
ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int calculate_device_limits(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
__u64 initial_sectors;
|
|
|
|
|
|
|
|
calculate_journal_section_size(ic);
|
|
|
|
initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
|
2018-07-03 18:13:30 +00:00
|
|
|
if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
|
2017-01-04 19:23:53 +00:00
|
|
|
return -EINVAL;
|
|
|
|
ic->initial_sectors = initial_sectors;
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev) {
|
|
|
|
sector_t last_sector, last_area, last_offset;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2019-11-13 11:48:16 +00:00
|
|
|
/* we have to maintain excessive padding for compatibility with existing volumes */
|
|
|
|
__u64 metadata_run_padding =
|
|
|
|
ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
|
|
|
|
(__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
|
|
|
|
(__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
|
|
|
|
|
|
|
|
ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
|
|
|
|
metadata_run_padding) >> SECTOR_SHIFT;
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!(ic->metadata_run & (ic->metadata_run - 1)))
|
|
|
|
ic->log2_metadata_run = __ffs(ic->metadata_run);
|
|
|
|
else
|
|
|
|
ic->log2_metadata_run = -1;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
|
|
|
|
last_sector = get_data_sector(ic, last_area, last_offset);
|
|
|
|
if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
2019-05-07 18:28:35 +00:00
|
|
|
__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
|
|
|
|
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
|
|
|
|
meta_size <<= ic->log2_buffer_sectors;
|
|
|
|
if (ic->initial_sectors + meta_size < ic->initial_sectors ||
|
|
|
|
ic->initial_sectors + meta_size > ic->meta_device_sectors)
|
|
|
|
return -EINVAL;
|
|
|
|
ic->metadata_run = 1;
|
|
|
|
ic->log2_metadata_run = 0;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:24 +00:00
|
|
|
static void get_provided_data_sectors(struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
if (!ic->meta_dev) {
|
|
|
|
int test_bit;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2020-03-22 19:42:24 +00:00
|
|
|
ic->provided_data_sectors = 0;
|
|
|
|
for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
|
|
|
|
__u64 prev_data_sectors = ic->provided_data_sectors;
|
|
|
|
|
|
|
|
ic->provided_data_sectors |= (sector_t)1 << test_bit;
|
|
|
|
if (calculate_device_limits(ic))
|
|
|
|
ic->provided_data_sectors = prev_data_sectors;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ic->provided_data_sectors = ic->data_device_sectors;
|
|
|
|
ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static int initialize_superblock(struct dm_integrity_c *ic,
|
|
|
|
unsigned int journal_sectors, unsigned int interleave_sectors)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_sections;
|
2017-01-04 19:23:53 +00:00
|
|
|
int test_bit;
|
|
|
|
|
2017-04-18 20:51:50 +00:00
|
|
|
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
|
2017-01-04 19:23:53 +00:00
|
|
|
memcpy(ic->sb->magic, SB_MAGIC, 8);
|
|
|
|
ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
|
2017-04-18 20:51:52 +00:00
|
|
|
ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->journal_mac_alg.alg_string)
|
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
|
|
|
|
|
|
|
|
calculate_journal_section_size(ic);
|
|
|
|
journal_sections = journal_sectors / ic->journal_section_sectors;
|
|
|
|
if (!journal_sections)
|
|
|
|
journal_sections = 1;
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
|
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
|
|
|
|
get_random_bytes(ic->sb->salt, SALT_SIZE);
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev) {
|
2019-11-13 11:48:16 +00:00
|
|
|
if (ic->fix_padding)
|
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
|
2018-07-03 18:13:30 +00:00
|
|
|
ic->sb->journal_sections = cpu_to_le32(journal_sections);
|
|
|
|
if (!interleave_sectors)
|
|
|
|
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
|
|
|
|
ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
|
2023-02-07 21:22:08 +00:00
|
|
|
ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
|
|
|
|
ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
|
2018-07-03 18:13:30 +00:00
|
|
|
|
2020-03-22 19:42:24 +00:00
|
|
|
get_provided_data_sectors(ic);
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->provided_data_sectors)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
ic->sb->log2_interleave_sectors = 0;
|
2020-03-22 19:42:24 +00:00
|
|
|
|
|
|
|
get_provided_data_sectors(ic);
|
|
|
|
if (!ic->provided_data_sectors)
|
|
|
|
return -EINVAL;
|
2018-07-03 18:13:30 +00:00
|
|
|
|
|
|
|
try_smaller_buffer:
|
|
|
|
ic->sb->journal_sections = cpu_to_le32(0);
|
|
|
|
for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
|
|
|
|
__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
|
|
|
|
__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (test_journal_sections > journal_sections)
|
|
|
|
continue;
|
|
|
|
ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
|
|
|
|
if (calculate_device_limits(ic))
|
|
|
|
ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
}
|
|
|
|
if (!le32_to_cpu(ic->sb->journal_sections)) {
|
|
|
|
if (ic->log2_buffer_sectors > 3) {
|
|
|
|
ic->log2_buffer_sectors--;
|
|
|
|
goto try_smaller_buffer;
|
|
|
|
}
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
|
|
|
|
|
2018-07-03 18:13:31 +00:00
|
|
|
sb_set_version(ic);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
|
|
|
|
{
|
|
|
|
struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
|
|
|
|
struct blk_integrity bi;
|
|
|
|
|
|
|
|
memset(&bi, 0, sizeof(bi));
|
|
|
|
bi.profile = &dm_integrity_profile;
|
2017-04-18 20:51:52 +00:00
|
|
|
bi.tuple_size = ic->tag_size;
|
|
|
|
bi.tag_size = bi.tuple_size;
|
2017-04-26 22:39:47 +00:00
|
|
|
bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
blk_integrity_register(disk, &bi);
|
|
|
|
blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
static void dm_integrity_free_page_list(struct page_list *pl)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (!pl)
|
|
|
|
return;
|
2019-04-29 12:57:20 +00:00
|
|
|
for (i = 0; pl[i].page; i++)
|
|
|
|
__free_page(pl[i].page);
|
2017-01-04 19:23:53 +00:00
|
|
|
kvfree(pl);
|
|
|
|
}
|
|
|
|
|
2023-01-25 20:14:58 +00:00
|
|
|
static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct page_list *pl;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!pl)
|
|
|
|
return NULL;
|
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
for (i = 0; i < n_pages; i++) {
|
2017-01-04 19:23:53 +00:00
|
|
|
pl[i].page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!pl[i].page) {
|
2019-04-29 12:57:20 +00:00
|
|
|
dm_integrity_free_page_list(pl);
|
2017-01-04 19:23:53 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (i)
|
|
|
|
pl[i - 1].next = &pl[i];
|
|
|
|
}
|
2019-04-29 12:57:20 +00:00
|
|
|
pl[i].page = NULL;
|
|
|
|
pl[i].next = NULL;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
return pl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
|
|
|
|
{
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
for (i = 0; i < ic->journal_sections; i++)
|
|
|
|
kvfree(sl[i]);
|
2018-04-17 22:32:26 +00:00
|
|
|
kvfree(sl);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
|
|
|
|
struct page_list *pl)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct scatterlist **sl;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
treewide: kvmalloc() -> kvmalloc_array()
The kvmalloc() function has a 2-factor argument form, kvmalloc_array(). This
patch replaces cases of:
kvmalloc(a * b, gfp)
with:
kvmalloc_array(a * b, gfp)
as well as handling cases of:
kvmalloc(a * b * c, gfp)
with:
kvmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvmalloc
+ kvmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvmalloc(sizeof(THING) * C2, ...)
|
kvmalloc(sizeof(TYPE) * C2, ...)
|
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(C1 * C2, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:32 +00:00
|
|
|
sl = kvmalloc_array(ic->journal_sections,
|
|
|
|
sizeof(struct scatterlist *),
|
|
|
|
GFP_KERNEL | __GFP_ZERO);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!sl)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < ic->journal_sections; i++) {
|
|
|
|
struct scatterlist *s;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int start_index, start_offset;
|
|
|
|
unsigned int end_index, end_offset;
|
|
|
|
unsigned int n_pages;
|
|
|
|
unsigned int idx;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
page_list_location(ic, i, 0, &start_index, &start_offset);
|
2019-05-09 19:25:49 +00:00
|
|
|
page_list_location(ic, i, ic->journal_section_sectors - 1,
|
|
|
|
&end_index, &end_offset);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
n_pages = (end_index - start_index + 1);
|
|
|
|
|
treewide: kvmalloc() -> kvmalloc_array()
The kvmalloc() function has a 2-factor argument form, kvmalloc_array(). This
patch replaces cases of:
kvmalloc(a * b, gfp)
with:
kvmalloc_array(a * b, gfp)
as well as handling cases of:
kvmalloc(a * b * c, gfp)
with:
kvmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvmalloc
+ kvmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvmalloc(sizeof(THING) * C2, ...)
|
kvmalloc(sizeof(TYPE) * C2, ...)
|
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(C1 * C2, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:32 +00:00
|
|
|
s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
|
|
|
|
GFP_KERNEL);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!s) {
|
|
|
|
dm_integrity_free_journal_scatterlist(ic, sl);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_init_table(s, n_pages);
|
|
|
|
for (idx = start_index; idx <= end_index; idx++) {
|
|
|
|
char *va = lowmem_page_address(pl[idx].page);
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int start = 0, end = PAGE_SIZE;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
if (idx == start_index)
|
|
|
|
start = start_offset;
|
|
|
|
if (idx == end_index)
|
|
|
|
end = end_offset + (1 << SECTOR_SHIFT);
|
|
|
|
sg_set_buf(&s[idx - start_index], va + start, end - start);
|
|
|
|
}
|
|
|
|
|
|
|
|
sl[i] = s;
|
|
|
|
}
|
|
|
|
|
|
|
|
return sl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_alg(struct alg_spec *a)
|
|
|
|
{
|
2020-08-07 06:18:13 +00:00
|
|
|
kfree_sensitive(a->alg_string);
|
|
|
|
kfree_sensitive(a->key);
|
2023-02-07 21:16:53 +00:00
|
|
|
memset(a, 0, sizeof(*a));
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
|
|
|
|
{
|
|
|
|
char *k;
|
|
|
|
|
|
|
|
free_alg(a);
|
|
|
|
|
|
|
|
a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
|
|
|
|
if (!a->alg_string)
|
|
|
|
goto nomem;
|
|
|
|
|
|
|
|
k = strchr(a->alg_string, ':');
|
|
|
|
if (k) {
|
|
|
|
*k = 0;
|
|
|
|
a->key_string = k + 1;
|
|
|
|
if (strlen(a->key_string) & 1)
|
|
|
|
goto inval;
|
|
|
|
|
|
|
|
a->key_size = strlen(a->key_string) / 2;
|
|
|
|
a->key = kmalloc(a->key_size, GFP_KERNEL);
|
|
|
|
if (!a->key)
|
|
|
|
goto nomem;
|
2017-04-27 15:49:33 +00:00
|
|
|
if (hex2bin(a->key, a->key_string, a->key_size))
|
|
|
|
goto inval;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
inval:
|
|
|
|
*error = error_inval;
|
|
|
|
return -EINVAL;
|
|
|
|
nomem:
|
|
|
|
*error = "Out of memory for an argument";
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
|
|
|
|
char *error_alg, char *error_key)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (a->alg_string) {
|
2020-10-15 17:21:44 +00:00
|
|
|
*hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (IS_ERR(*hash)) {
|
|
|
|
*error = error_alg;
|
|
|
|
r = PTR_ERR(*hash);
|
|
|
|
*hash = NULL;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->key) {
|
|
|
|
r = crypto_shash_setkey(*hash, a->key, a->key_size);
|
|
|
|
if (r) {
|
|
|
|
*error = error_key;
|
|
|
|
return r;
|
|
|
|
}
|
2018-02-13 13:50:50 +00:00
|
|
|
} else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
|
|
|
|
*error = error_key;
|
|
|
|
return -ENOKEY;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-17 18:56:17 +00:00
|
|
|
static int create_journal(struct dm_integrity_c *ic, char **error)
|
|
|
|
{
|
|
|
|
int r = 0;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-03-17 18:56:17 +00:00
|
|
|
__u64 journal_pages, journal_desc_size, journal_tree_size;
|
2018-01-10 14:32:47 +00:00
|
|
|
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
|
|
|
|
struct skcipher_request *req = NULL;
|
2017-04-18 20:51:50 +00:00
|
|
|
|
|
|
|
ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
|
|
|
|
ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
|
|
|
|
ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
|
|
|
|
ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
|
2017-03-17 18:56:17 +00:00
|
|
|
|
|
|
|
journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
|
|
|
|
PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
|
|
|
|
journal_desc_size = journal_pages * sizeof(struct page_list);
|
2018-12-28 08:34:29 +00:00
|
|
|
if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
|
2017-03-17 18:56:17 +00:00
|
|
|
*error = "Journal doesn't fit into memory";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->journal_pages = journal_pages;
|
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!ic->journal) {
|
|
|
|
*error = "Could not allocate memory for journal";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (ic->journal_crypt_alg.alg_string) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int ivsize, blocksize;
|
2017-03-17 18:56:17 +00:00
|
|
|
struct journal_completion comp;
|
|
|
|
|
|
|
|
comp.ic = ic;
|
2020-10-15 17:21:44 +00:00
|
|
|
ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (IS_ERR(ic->journal_crypt)) {
|
|
|
|
*error = "Invalid journal cipher";
|
|
|
|
r = PTR_ERR(ic->journal_crypt);
|
|
|
|
ic->journal_crypt = NULL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
|
|
|
|
blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
|
|
|
|
|
|
|
|
if (ic->journal_crypt_alg.key) {
|
|
|
|
r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
|
|
|
|
ic->journal_crypt_alg.key_size);
|
|
|
|
if (r) {
|
|
|
|
*error = "Error setting encryption key";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUG_print("cipher %s, block size %u iv size %u\n",
|
|
|
|
ic->journal_crypt_alg.alg_string, blocksize, ivsize);
|
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!ic->journal_io) {
|
|
|
|
*error = "Could not allocate memory for journal io";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blocksize == 1) {
|
|
|
|
struct scatterlist *sg;
|
2018-01-10 14:32:47 +00:00
|
|
|
|
|
|
|
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
|
|
|
|
if (!req) {
|
|
|
|
*error = "Could not allocate crypt request";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2019-06-28 02:47:34 +00:00
|
|
|
crypt_iv = kzalloc(ivsize, GFP_KERNEL);
|
2018-01-10 14:32:47 +00:00
|
|
|
if (!crypt_iv) {
|
|
|
|
*error = "Could not allocate iv";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-03-17 18:56:17 +00:00
|
|
|
|
2019-04-29 12:57:20 +00:00
|
|
|
ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!ic->journal_xor) {
|
|
|
|
*error = "Could not allocate memory for journal xor";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
treewide: kvmalloc() -> kvmalloc_array()
The kvmalloc() function has a 2-factor argument form, kvmalloc_array(). This
patch replaces cases of:
kvmalloc(a * b, gfp)
with:
kvmalloc_array(a * b, gfp)
as well as handling cases of:
kvmalloc(a * b * c, gfp)
with:
kvmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvmalloc
+ kvmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvmalloc(sizeof(THING) * C2, ...)
|
kvmalloc(sizeof(TYPE) * C2, ...)
|
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(C1 * C2, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:32 +00:00
|
|
|
sg = kvmalloc_array(ic->journal_pages + 1,
|
|
|
|
sizeof(struct scatterlist),
|
|
|
|
GFP_KERNEL);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!sg) {
|
|
|
|
*error = "Unable to allocate sg list";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
sg_init_table(sg, ic->journal_pages + 1);
|
|
|
|
for (i = 0; i < ic->journal_pages; i++) {
|
|
|
|
char *va = lowmem_page_address(ic->journal_xor[i].page);
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-03-17 18:56:17 +00:00
|
|
|
clear_page(va);
|
|
|
|
sg_set_buf(&sg[i], va, PAGE_SIZE);
|
|
|
|
}
|
2023-02-07 21:16:53 +00:00
|
|
|
sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
|
2017-03-17 18:56:17 +00:00
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
skcipher_request_set_crypt(req, sg, sg,
|
2023-02-07 21:16:53 +00:00
|
|
|
PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&comp.comp);
|
2017-03-17 18:56:17 +00:00
|
|
|
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
|
|
|
if (do_crypt(true, req, &comp))
|
|
|
|
wait_for_completion(&comp.comp);
|
|
|
|
kvfree(sg);
|
|
|
|
r = dm_integrity_failed(ic);
|
|
|
|
if (r) {
|
|
|
|
*error = "Unable to encrypt journal";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
|
|
|
|
|
|
|
|
crypto_free_skcipher(ic->journal_crypt);
|
|
|
|
ic->journal_crypt = NULL;
|
|
|
|
} else {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int crypt_len = roundup(ivsize, blocksize);
|
2017-04-18 20:51:50 +00:00
|
|
|
|
2018-01-10 14:32:47 +00:00
|
|
|
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
|
|
|
|
if (!req) {
|
|
|
|
*error = "Could not allocate crypt request";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
crypt_iv = kmalloc(ivsize, GFP_KERNEL);
|
|
|
|
if (!crypt_iv) {
|
|
|
|
*error = "Could not allocate iv";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2017-04-18 20:51:50 +00:00
|
|
|
crypt_data = kmalloc(crypt_len, GFP_KERNEL);
|
|
|
|
if (!crypt_data) {
|
|
|
|
*error = "Unable to allocate crypt data";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-03-17 18:56:17 +00:00
|
|
|
|
|
|
|
ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
|
|
|
|
if (!ic->journal_scatterlist) {
|
|
|
|
*error = "Unable to allocate sg list";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
|
|
|
|
if (!ic->journal_io_scatterlist) {
|
|
|
|
*error = "Unable to allocate sg list";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
treewide: kvmalloc() -> kvmalloc_array()
The kvmalloc() function has a 2-factor argument form, kvmalloc_array(). This
patch replaces cases of:
kvmalloc(a * b, gfp)
with:
kvmalloc_array(a * b, gfp)
as well as handling cases of:
kvmalloc(a * b * c, gfp)
with:
kvmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kvmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kvmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kvmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kvmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kvmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kvmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kvmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kvmalloc
+ kvmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kvmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kvmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kvmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kvmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kvmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kvmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kvmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kvmalloc(sizeof(THING) * C2, ...)
|
kvmalloc(sizeof(TYPE) * C2, ...)
|
kvmalloc(C1 * C2 * C3, ...)
|
kvmalloc(C1 * C2, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kvmalloc
+ kvmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:04:32 +00:00
|
|
|
ic->sk_requests = kvmalloc_array(ic->journal_sections,
|
|
|
|
sizeof(struct skcipher_request *),
|
|
|
|
GFP_KERNEL | __GFP_ZERO);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!ic->sk_requests) {
|
|
|
|
*error = "Unable to allocate sk requests";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
for (i = 0; i < ic->journal_sections; i++) {
|
|
|
|
struct scatterlist sg;
|
|
|
|
struct skcipher_request *section_req;
|
2021-05-11 15:41:00 +00:00
|
|
|
__le32 section_le = cpu_to_le32(i);
|
2017-03-17 18:56:17 +00:00
|
|
|
|
2018-01-10 14:32:47 +00:00
|
|
|
memset(crypt_iv, 0x00, ivsize);
|
2017-03-17 18:56:17 +00:00
|
|
|
memset(crypt_data, 0x00, crypt_len);
|
2023-02-07 21:22:08 +00:00
|
|
|
memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le)));
|
2017-03-17 18:56:17 +00:00
|
|
|
|
|
|
|
sg_init_one(&sg, crypt_data, crypt_len);
|
2018-01-10 14:32:47 +00:00
|
|
|
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
|
2017-08-15 15:11:59 +00:00
|
|
|
init_completion(&comp.comp);
|
2017-03-17 18:56:17 +00:00
|
|
|
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
|
|
|
|
if (do_crypt(true, req, &comp))
|
|
|
|
wait_for_completion(&comp.comp);
|
|
|
|
|
|
|
|
r = dm_integrity_failed(ic);
|
|
|
|
if (r) {
|
|
|
|
*error = "Unable to generate iv";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
|
|
|
|
if (!section_req) {
|
|
|
|
*error = "Unable to allocate crypt request";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
section_req->iv = kmalloc_array(ivsize, 2,
|
|
|
|
GFP_KERNEL);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!section_req->iv) {
|
|
|
|
skcipher_request_free(section_req);
|
|
|
|
*error = "Unable to allocate iv";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
memcpy(section_req->iv + ivsize, crypt_data, ivsize);
|
|
|
|
section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
|
|
|
|
ic->sk_requests[i] = section_req;
|
|
|
|
DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < N_COMMIT_IDS; i++) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int j;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-03-17 18:56:17 +00:00
|
|
|
retest_commit_id:
|
|
|
|
for (j = 0; j < i; j++) {
|
|
|
|
if (ic->commit_ids[j] == ic->commit_ids[i]) {
|
|
|
|
ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
|
|
|
|
goto retest_commit_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
|
|
|
|
if (journal_tree_size > ULONG_MAX) {
|
|
|
|
*error = "Journal doesn't fit into memory";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-05-20 18:56:21 +00:00
|
|
|
ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
|
2017-03-17 18:56:17 +00:00
|
|
|
if (!ic->journal_tree) {
|
|
|
|
*error = "Could not allocate memory for journal tree";
|
|
|
|
r = -ENOMEM;
|
|
|
|
}
|
|
|
|
bad:
|
2017-04-18 20:51:50 +00:00
|
|
|
kfree(crypt_data);
|
2018-01-10 14:32:47 +00:00
|
|
|
kfree(crypt_iv);
|
|
|
|
skcipher_request_free(req);
|
|
|
|
|
2017-03-17 18:56:17 +00:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
/*
|
2017-04-18 20:51:50 +00:00
|
|
|
* Construct a integrity mapping
|
2017-01-04 19:23:53 +00:00
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* device
|
|
|
|
* offset from the start of the device
|
|
|
|
* tag size
|
2019-04-29 12:57:24 +00:00
|
|
|
* D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
|
2017-01-04 19:23:53 +00:00
|
|
|
* number of optional arguments
|
|
|
|
* optional arguments:
|
2017-04-18 20:51:50 +00:00
|
|
|
* journal_sectors
|
|
|
|
* interleave_sectors
|
|
|
|
* buffer_sectors
|
|
|
|
* journal_watermark
|
|
|
|
* commit_time
|
2019-04-29 12:57:23 +00:00
|
|
|
* meta_device
|
|
|
|
* block_size
|
2019-04-29 12:57:24 +00:00
|
|
|
* sectors_per_bit
|
|
|
|
* bitmap_flush_interval
|
2017-04-18 20:51:50 +00:00
|
|
|
* internal_hash
|
|
|
|
* journal_crypt
|
|
|
|
* journal_mac
|
2019-04-29 12:57:23 +00:00
|
|
|
* recalculate
|
2017-01-04 19:23:53 +00:00
|
|
|
*/
|
2023-01-25 20:14:58 +00:00
|
|
|
static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic;
|
|
|
|
char dummy;
|
|
|
|
int r;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int extra_args;
|
2017-01-04 19:23:53 +00:00
|
|
|
struct dm_arg_set as;
|
2017-06-22 18:32:45 +00:00
|
|
|
static const struct dm_arg _args[] = {
|
2021-03-23 14:59:45 +00:00
|
|
|
{0, 18, "Invalid number of feature args"},
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
|
2017-01-04 19:23:53 +00:00
|
|
|
bool should_write_sb;
|
|
|
|
__u64 threshold;
|
|
|
|
unsigned long long start;
|
2019-04-29 12:57:24 +00:00
|
|
|
__s8 log2_sectors_per_bitmap_bit = -1;
|
|
|
|
__s8 log2_blocks_per_bitmap_bit;
|
|
|
|
__u64 bits_in_journal;
|
|
|
|
__u64 n_bitmap_bits;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
#define DIRECT_ARGUMENTS 4
|
|
|
|
|
|
|
|
if (argc <= DIRECT_ARGUMENTS) {
|
|
|
|
ti->error = "Invalid argument count";
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
|
|
|
|
if (!ic) {
|
|
|
|
ti->error = "Cannot allocate integrity context";
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
ti->private = ic;
|
|
|
|
ti->per_io_data_size = sizeof(struct dm_integrity_io);
|
2020-02-24 09:20:28 +00:00
|
|
|
ic->ti = ti;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
ic->in_progress = RB_ROOT;
|
2018-07-03 18:13:27 +00:00
|
|
|
INIT_LIST_HEAD(&ic->wait_list);
|
2017-01-04 19:23:53 +00:00
|
|
|
init_waitqueue_head(&ic->endio_wait);
|
|
|
|
bio_list_init(&ic->flush_bio_list);
|
|
|
|
init_waitqueue_head(&ic->copy_to_journal_wait);
|
|
|
|
init_completion(&ic->crypto_backoff);
|
2017-07-21 16:00:00 +00:00
|
|
|
atomic64_set(&ic->number_of_mismatches, 0);
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
|
|
|
|
if (r) {
|
|
|
|
ti->error = "Device lookup failed";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
|
|
|
|
ti->error = "Invalid starting offset";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->start = start;
|
|
|
|
|
|
|
|
if (strcmp(argv[2], "-")) {
|
|
|
|
if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
|
|
|
|
ti->error = "Invalid tag size";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-09 19:25:49 +00:00
|
|
|
if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
|
|
|
|
!strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->mode = argv[3][0];
|
2019-04-29 12:57:24 +00:00
|
|
|
} else {
|
|
|
|
ti->error = "Invalid mode (expecting J, B, D, R)";
|
2017-01-04 19:23:53 +00:00
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
journal_sectors = 0;
|
2017-01-04 19:23:53 +00:00
|
|
|
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
|
|
|
|
buffer_sectors = DEFAULT_BUFFER_SECTORS;
|
|
|
|
journal_watermark = DEFAULT_JOURNAL_WATERMARK;
|
|
|
|
sync_msec = DEFAULT_SYNC_MSEC;
|
2017-04-18 20:51:52 +00:00
|
|
|
ic->sectors_per_block = 1;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
as.argc = argc - DIRECT_ARGUMENTS;
|
|
|
|
as.argv = argv + DIRECT_ARGUMENTS;
|
|
|
|
r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
while (extra_args--) {
|
|
|
|
const char *opt_string;
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int val;
|
2019-04-29 12:57:24 +00:00
|
|
|
unsigned long long llval;
|
2023-02-01 22:42:29 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
opt_string = dm_shift_arg(&as);
|
|
|
|
if (!opt_string) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Not enough feature arguments";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-04-18 20:51:50 +00:00
|
|
|
if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
|
2018-07-03 18:13:30 +00:00
|
|
|
journal_sectors = val ? val : 1;
|
2017-04-18 20:51:50 +00:00
|
|
|
else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
|
2017-01-04 19:23:53 +00:00
|
|
|
interleave_sectors = val;
|
2017-04-18 20:51:50 +00:00
|
|
|
else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
|
2017-01-04 19:23:53 +00:00
|
|
|
buffer_sectors = val;
|
2017-04-18 20:51:50 +00:00
|
|
|
else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
|
2017-01-04 19:23:53 +00:00
|
|
|
journal_watermark = val;
|
2017-04-18 20:51:50 +00:00
|
|
|
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
|
2017-01-04 19:23:53 +00:00
|
|
|
sync_msec = val;
|
2019-03-13 11:56:02 +00:00
|
|
|
else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
|
2018-07-03 18:13:30 +00:00
|
|
|
if (ic->meta_dev) {
|
|
|
|
dm_put_device(ti, ic->meta_dev);
|
|
|
|
ic->meta_dev = NULL;
|
|
|
|
}
|
2019-05-09 19:25:49 +00:00
|
|
|
r = dm_get_device(ti, strchr(opt_string, ':') + 1,
|
|
|
|
dm_table_get_mode(ti->table), &ic->meta_dev);
|
2018-07-03 18:13:30 +00:00
|
|
|
if (r) {
|
|
|
|
ti->error = "Device lookup failed";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
|
2017-04-18 20:51:52 +00:00
|
|
|
if (val < 1 << SECTOR_SHIFT ||
|
|
|
|
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
|
2023-01-25 22:31:55 +00:00
|
|
|
(val & (val - 1))) {
|
2017-04-18 20:51:52 +00:00
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Invalid block_size argument";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->sectors_per_block = val >> SECTOR_SHIFT;
|
2019-04-29 12:57:24 +00:00
|
|
|
} else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
|
|
|
|
log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
|
|
|
|
} else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
|
|
|
|
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Invalid bitmap_flush_interval argument";
|
2021-04-14 01:43:44 +00:00
|
|
|
goto bad;
|
2019-04-29 12:57:24 +00:00
|
|
|
}
|
|
|
|
ic->bitmap_flush_interval = msecs_to_jiffies(val);
|
2019-03-13 11:56:02 +00:00
|
|
|
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
|
2017-01-04 19:23:53 +00:00
|
|
|
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
|
2017-04-18 20:51:50 +00:00
|
|
|
"Invalid internal_hash argument");
|
2017-01-04 19:23:53 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
2019-03-13 11:56:02 +00:00
|
|
|
} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
|
2017-01-04 19:23:53 +00:00
|
|
|
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
|
2017-04-18 20:51:50 +00:00
|
|
|
"Invalid journal_crypt argument");
|
2017-01-04 19:23:53 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
2019-03-13 11:56:02 +00:00
|
|
|
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
|
2021-01-21 15:09:32 +00:00
|
|
|
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
|
2017-04-18 20:51:50 +00:00
|
|
|
"Invalid journal_mac argument");
|
2017-01-04 19:23:53 +00:00
|
|
|
if (r)
|
|
|
|
goto bad;
|
2018-07-03 18:13:33 +00:00
|
|
|
} else if (!strcmp(opt_string, "recalculate")) {
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->recalculate_flag = true;
|
2021-03-23 14:59:45 +00:00
|
|
|
} else if (!strcmp(opt_string, "reset_recalculate")) {
|
|
|
|
ic->recalculate_flag = true;
|
|
|
|
ic->reset_recalculate_flag = true;
|
2020-03-22 19:42:26 +00:00
|
|
|
} else if (!strcmp(opt_string, "allow_discards")) {
|
|
|
|
ic->discard = true;
|
2019-11-13 11:48:16 +00:00
|
|
|
} else if (!strcmp(opt_string, "fix_padding")) {
|
|
|
|
ic->fix_padding = true;
|
2021-01-21 15:09:32 +00:00
|
|
|
} else if (!strcmp(opt_string, "fix_hmac")) {
|
|
|
|
ic->fix_hmac = true;
|
2021-01-20 18:59:11 +00:00
|
|
|
} else if (!strcmp(opt_string, "legacy_recalculate")) {
|
|
|
|
ic->legacy_recalculate = true;
|
2017-01-04 19:23:53 +00:00
|
|
|
} else {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Invalid argument";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-18 10:11:05 +00:00
|
|
|
ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev)
|
|
|
|
ic->meta_device_sectors = ic->data_device_sectors;
|
|
|
|
else
|
2021-10-18 10:11:05 +00:00
|
|
|
ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
|
2018-07-03 18:13:30 +00:00
|
|
|
|
|
|
|
if (!journal_sectors) {
|
|
|
|
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
|
2019-05-09 19:25:49 +00:00
|
|
|
ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
|
2018-07-03 18:13:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!buffer_sectors)
|
|
|
|
buffer_sectors = 1;
|
|
|
|
ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
|
|
|
|
"Invalid internal hash", "Error setting internal hash key");
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
|
|
|
|
"Invalid journal mac", "Error setting journal mac key");
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
if (!ic->tag_size) {
|
|
|
|
if (!ic->internal_hash) {
|
|
|
|
ti->error = "Unknown tag size";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
|
|
|
|
}
|
|
|
|
if (ic->tag_size > MAX_TAG_SIZE) {
|
|
|
|
ti->error = "Too big tag size";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (!(ic->tag_size & (ic->tag_size - 1)))
|
|
|
|
ic->log2_tag_size = __ffs(ic->tag_size);
|
|
|
|
else
|
|
|
|
ic->log2_tag_size = -1;
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B' && !ic->internal_hash) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Bitmap mode can be only used with internal hash";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->discard && !ic->internal_hash) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Discard can be only used with internal hash";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
|
|
|
|
ic->autocommit_msec = sync_msec;
|
2017-10-17 00:01:48 +00:00
|
|
|
timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
ic->io = dm_io_client_create();
|
|
|
|
if (IS_ERR(ic->io)) {
|
|
|
|
r = PTR_ERR(ic->io);
|
|
|
|
ic->io = NULL;
|
|
|
|
ti->error = "Cannot allocate dm io";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2018-05-20 22:25:53 +00:00
|
|
|
r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
|
|
|
|
if (r) {
|
2017-01-04 19:23:53 +00:00
|
|
|
ti->error = "Cannot allocate mempool";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
|
|
|
|
WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
|
|
|
|
if (!ic->metadata_wq) {
|
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
dm integrity: Use alloc_ordered_workqueue() to create ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: dm-devel@redhat.com
Cc: linux-kernel@vger.kernel.org
2023-05-25 22:15:02 +00:00
|
|
|
* If this workqueue weren't ordered, it would cause bio reordering
|
2017-01-04 19:23:53 +00:00
|
|
|
* and reduced performance.
|
|
|
|
*/
|
dm integrity: Use alloc_ordered_workqueue() to create ordered workqueues
BACKGROUND
==========
When multiple work items are queued to a workqueue, their execution order
doesn't match the queueing order. They may get executed in any order and
simultaneously. When fully serialized execution - one by one in the queueing
order - is needed, an ordered workqueue should be used which can be created
with alloc_ordered_workqueue().
However, alloc_ordered_workqueue() was a later addition. Before it, an
ordered workqueue could be obtained by creating an UNBOUND workqueue with
@max_active==1. This originally was an implementation side-effect which was
broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be
ordered"). Because there were users that depended on the ordered execution,
5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered")
made workqueue allocation path to implicitly promote UNBOUND workqueues w/
@max_active==1 to ordered workqueues.
While this has worked okay, overloading the UNBOUND allocation interface
this way creates other issues. It's difficult to tell whether a given
workqueue actually needs to be ordered and users that legitimately want a
min concurrency level wq unexpectedly gets an ordered one instead. With
planned UNBOUND workqueue updates to improve execution locality and more
prevalence of chiplet designs which can benefit from such improvements, this
isn't a state we wanna be in forever.
This patch series audits all callsites that create an UNBOUND workqueue w/
@max_active==1 and converts them to alloc_ordered_workqueue() as necessary.
WHAT TO LOOK FOR
================
The conversions are from
alloc_workqueue(WQ_UNBOUND | flags, 1, args..)
to
alloc_ordered_workqueue(flags, args...)
which don't cause any functional changes. If you know that fully ordered
execution is not necessary, please let me know. I'll drop the conversion and
instead add a comment noting the fact to reduce confusion while conversion
is in progress.
If you aren't fully sure, it's completely fine to let the conversion
through. The behavior will stay exactly the same and we can always
reconsider later.
As there are follow-up workqueue core changes, I'd really appreciate if the
patch can be routed through the workqueue tree w/ your acks. Thanks.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Cc: dm-devel@redhat.com
Cc: linux-kernel@vger.kernel.org
2023-05-25 22:15:02 +00:00
|
|
|
ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (!ic->wait_wq) {
|
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2020-02-17 12:43:03 +00:00
|
|
|
ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
|
|
|
|
METADATA_WORKQUEUE_MAX_ACTIVE);
|
|
|
|
if (!ic->offload_wq) {
|
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
|
|
|
|
if (!ic->commit_wq) {
|
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
INIT_WORK(&ic->commit_work, integrity_commit);
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'J' || ic->mode == 'B') {
|
2017-01-04 19:23:53 +00:00
|
|
|
ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
|
|
|
|
if (!ic->writer_wq) {
|
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
INIT_WORK(&ic->writer_work, integrity_writer);
|
|
|
|
}
|
|
|
|
|
|
|
|
ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
|
|
|
|
if (!ic->sb) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
ti->error = "Cannot allocate superblock area";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_READ);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (r) {
|
|
|
|
ti->error = "Error reading superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-03-17 16:40:51 +00:00
|
|
|
should_write_sb = false;
|
|
|
|
if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
|
|
|
|
if (ic->mode != 'R') {
|
2017-04-18 20:51:50 +00:00
|
|
|
if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "The device is not initialized";
|
|
|
|
goto bad;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r = initialize_superblock(ic, journal_sectors, interleave_sectors);
|
|
|
|
if (r) {
|
|
|
|
ti->error = "Could not initialize superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-03-17 16:40:51 +00:00
|
|
|
if (ic->mode != 'R')
|
|
|
|
should_write_sb = true;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2021-01-21 15:09:32 +00:00
|
|
|
if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
|
2017-01-04 19:23:53 +00:00
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Unknown version";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
|
|
|
|
r = -EINVAL;
|
2017-04-18 20:51:52 +00:00
|
|
|
ti->error = "Tag size doesn't match the information in superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Block size doesn't match the information in superblock";
|
2017-01-04 19:23:53 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
2017-07-21 15:58:38 +00:00
|
|
|
if (!le32_to_cpu(ic->sb->journal_sections)) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Corrupted superblock, journal_sections is 0";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
/* make sure that ti->max_io_len doesn't overflow */
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev) {
|
|
|
|
if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
|
|
|
|
ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Invalid interleave_sectors in the superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (ic->sb->log2_interleave_sectors) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Invalid interleave_sectors in the superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
2020-03-22 19:42:25 +00:00
|
|
|
if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
|
2017-01-04 19:23:53 +00:00
|
|
|
r = -EINVAL;
|
2020-03-22 19:42:25 +00:00
|
|
|
ti->error = "Journal mac mismatch";
|
2017-01-04 19:23:53 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
2020-03-22 19:42:25 +00:00
|
|
|
|
|
|
|
get_provided_data_sectors(ic);
|
|
|
|
if (!ic->provided_data_sectors) {
|
2017-01-04 19:23:53 +00:00
|
|
|
r = -EINVAL;
|
2020-03-22 19:42:25 +00:00
|
|
|
ti->error = "The device is too small";
|
2017-01-04 19:23:53 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
2018-07-03 18:13:30 +00:00
|
|
|
|
|
|
|
try_smaller_buffer:
|
2017-01-04 19:23:53 +00:00
|
|
|
r = calculate_device_limits(ic);
|
|
|
|
if (r) {
|
2018-07-03 18:13:30 +00:00
|
|
|
if (ic->meta_dev) {
|
|
|
|
if (ic->log2_buffer_sectors > 3) {
|
|
|
|
ic->log2_buffer_sectors--;
|
|
|
|
goto try_smaller_buffer;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
ti->error = "The device is too small";
|
|
|
|
goto bad;
|
|
|
|
}
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
if (log2_sectors_per_bitmap_bit < 0)
|
|
|
|
log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
|
|
|
|
if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
|
|
|
|
log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
|
|
|
|
|
|
|
|
bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
|
|
|
|
if (bits_in_journal > UINT_MAX)
|
|
|
|
bits_in_journal = UINT_MAX;
|
|
|
|
while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
|
|
|
|
log2_sectors_per_bitmap_bit++;
|
|
|
|
|
|
|
|
log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
|
|
|
|
ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
|
2023-02-02 16:10:52 +00:00
|
|
|
if (should_write_sb)
|
2019-04-29 12:57:24 +00:00
|
|
|
ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
|
2023-02-02 16:10:52 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
|
|
|
|
+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
|
|
|
|
ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev)
|
|
|
|
ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
|
|
|
|
|
2017-06-05 15:52:39 +00:00
|
|
|
if (ti->len > ic->provided_data_sectors) {
|
|
|
|
r = -EINVAL;
|
|
|
|
ti->error = "Not enough provided sectors for requested mapping size";
|
|
|
|
goto bad;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
|
|
|
|
threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
|
|
|
|
threshold += 50;
|
|
|
|
do_div(threshold, 100);
|
|
|
|
ic->free_sectors_threshold = threshold;
|
|
|
|
|
|
|
|
DEBUG_print("initialized:\n");
|
|
|
|
DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
|
|
|
|
DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
|
|
|
|
DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
|
|
|
|
DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
|
|
|
|
DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
|
2023-01-25 20:14:58 +00:00
|
|
|
DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
|
2017-01-04 19:23:53 +00:00
|
|
|
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
|
|
|
|
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
|
2021-10-18 10:11:05 +00:00
|
|
|
DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
|
2017-01-04 19:23:53 +00:00
|
|
|
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
|
|
|
|
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
|
|
|
|
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
|
2020-03-22 19:42:22 +00:00
|
|
|
DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
|
2017-01-04 19:23:53 +00:00
|
|
|
DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
|
2020-03-22 19:42:22 +00:00
|
|
|
DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
|
2018-07-03 18:13:33 +00:00
|
|
|
ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
|
|
|
|
ic->sb->recalc_sector = cpu_to_le64(0);
|
|
|
|
}
|
|
|
|
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->internal_hash) {
|
2018-11-28 15:15:31 +00:00
|
|
|
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
2023-01-25 22:31:55 +00:00
|
|
|
if (!ic->recalc_wq) {
|
2018-07-03 18:13:33 +00:00
|
|
|
ti->error = "Cannot allocate workqueue";
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
INIT_WORK(&ic->recalc_work, integrity_recalc);
|
2021-01-20 11:02:31 +00:00
|
|
|
} else {
|
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
|
|
|
|
ti->error = "Recalculate can only be specified with internal_hash";
|
|
|
|
r = -EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
2018-07-03 18:13:33 +00:00
|
|
|
}
|
|
|
|
|
2021-01-20 18:59:11 +00:00
|
|
|
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
|
|
|
|
le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
|
|
|
|
dm_integrity_disable_recalculate(ic)) {
|
|
|
|
ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
|
|
|
|
r = -EOPNOTSUPP;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
|
2022-07-22 09:38:21 +00:00
|
|
|
1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (IS_ERR(ic->bufio)) {
|
|
|
|
r = PTR_ERR(ic->bufio);
|
|
|
|
ti->error = "Cannot initialize dm-bufio";
|
|
|
|
ic->bufio = NULL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
|
|
|
|
|
2017-03-17 16:40:51 +00:00
|
|
|
if (ic->mode != 'R') {
|
|
|
|
r = create_journal(ic, &ti->error);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ic->mode == 'B') {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
|
|
|
unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
|
|
|
|
if (!ic->recalc_bitmap) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
|
|
|
|
if (!ic->may_write_bitmap) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
|
|
|
|
if (!ic->bbs) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
|
|
|
|
for (i = 0; i < ic->n_bitmap_blocks; i++) {
|
|
|
|
struct bitmap_block_status *bbs = &ic->bbs[i];
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int sector, pl_index, pl_offset;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
|
|
|
INIT_WORK(&bbs->work, bitmap_block_work);
|
|
|
|
bbs->ic = ic;
|
|
|
|
bbs->idx = i;
|
|
|
|
bio_list_init(&bbs->bio_queue);
|
|
|
|
spin_lock_init(&bbs->bio_queue_lock);
|
|
|
|
|
|
|
|
sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
|
|
|
|
pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
|
|
|
|
pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
|
|
|
|
|
|
|
|
bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
|
|
|
|
}
|
2017-03-17 16:40:51 +00:00
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (should_write_sb) {
|
|
|
|
init_journal(ic, 0, ic->journal_sections, 0);
|
|
|
|
r = dm_integrity_failed(ic);
|
|
|
|
if (unlikely(r)) {
|
|
|
|
ti->error = "Error initializing journal";
|
|
|
|
goto bad;
|
|
|
|
}
|
2022-07-14 18:06:52 +00:00
|
|
|
r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (r) {
|
|
|
|
ti->error = "Error initializing superblock";
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
ic->just_formatted = true;
|
|
|
|
}
|
|
|
|
|
2018-07-03 18:13:30 +00:00
|
|
|
if (!ic->meta_dev) {
|
|
|
|
r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
}
|
2019-04-29 12:57:24 +00:00
|
|
|
if (ic->mode == 'B') {
|
2023-02-01 22:42:29 +00:00
|
|
|
unsigned int max_io_len;
|
|
|
|
|
|
|
|
max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
|
2019-04-29 12:57:24 +00:00
|
|
|
if (!max_io_len)
|
|
|
|
max_io_len = 1U << 31;
|
|
|
|
DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
|
|
|
|
if (!ti->max_io_len || ti->max_io_len > max_io_len) {
|
|
|
|
r = dm_set_target_max_io_len(ti, max_io_len);
|
|
|
|
if (r)
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
if (!ic->internal_hash)
|
|
|
|
dm_integrity_set(ti, ic);
|
|
|
|
|
|
|
|
ti->num_flush_bios = 1;
|
|
|
|
ti->flush_supported = true;
|
2020-03-22 19:42:26 +00:00
|
|
|
if (ic->discard)
|
|
|
|
ti->num_discard_bios = 1;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
|
2017-01-04 19:23:53 +00:00
|
|
|
return 0;
|
2019-04-29 12:57:24 +00:00
|
|
|
|
2017-01-04 19:23:53 +00:00
|
|
|
bad:
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
|
2017-01-04 19:23:53 +00:00
|
|
|
dm_integrity_dtr(ti);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dm_integrity_dtr(struct dm_target *ti)
|
|
|
|
{
|
|
|
|
struct dm_integrity_c *ic = ti->private;
|
|
|
|
|
|
|
|
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
|
2018-07-03 18:13:27 +00:00
|
|
|
BUG_ON(!list_empty(&ic->wait_list));
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2022-11-29 02:48:50 +00:00
|
|
|
if (ic->mode == 'B')
|
|
|
|
cancel_delayed_work_sync(&ic->bitmap_flush_work);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->metadata_wq)
|
|
|
|
destroy_workqueue(ic->metadata_wq);
|
|
|
|
if (ic->wait_wq)
|
|
|
|
destroy_workqueue(ic->wait_wq);
|
2020-02-17 12:43:03 +00:00
|
|
|
if (ic->offload_wq)
|
|
|
|
destroy_workqueue(ic->offload_wq);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->commit_wq)
|
|
|
|
destroy_workqueue(ic->commit_wq);
|
|
|
|
if (ic->writer_wq)
|
|
|
|
destroy_workqueue(ic->writer_wq);
|
2018-07-03 18:13:33 +00:00
|
|
|
if (ic->recalc_wq)
|
|
|
|
destroy_workqueue(ic->recalc_wq);
|
2019-04-29 12:57:24 +00:00
|
|
|
kvfree(ic->bbs);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->bufio)
|
|
|
|
dm_bufio_client_destroy(ic->bufio);
|
2018-05-20 22:25:53 +00:00
|
|
|
mempool_exit(&ic->journal_io_mempool);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->io)
|
|
|
|
dm_io_client_destroy(ic->io);
|
|
|
|
if (ic->dev)
|
|
|
|
dm_put_device(ti, ic->dev);
|
2018-07-03 18:13:30 +00:00
|
|
|
if (ic->meta_dev)
|
|
|
|
dm_put_device(ti, ic->meta_dev);
|
2019-04-29 12:57:20 +00:00
|
|
|
dm_integrity_free_page_list(ic->journal);
|
|
|
|
dm_integrity_free_page_list(ic->journal_io);
|
|
|
|
dm_integrity_free_page_list(ic->journal_xor);
|
2019-04-29 12:57:24 +00:00
|
|
|
dm_integrity_free_page_list(ic->recalc_bitmap);
|
|
|
|
dm_integrity_free_page_list(ic->may_write_bitmap);
|
2017-01-04 19:23:53 +00:00
|
|
|
if (ic->journal_scatterlist)
|
|
|
|
dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
|
|
|
|
if (ic->journal_io_scatterlist)
|
|
|
|
dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
|
|
|
|
if (ic->sk_requests) {
|
2023-01-25 20:14:58 +00:00
|
|
|
unsigned int i;
|
2017-01-04 19:23:53 +00:00
|
|
|
|
|
|
|
for (i = 0; i < ic->journal_sections; i++) {
|
2023-02-01 22:42:29 +00:00
|
|
|
struct skcipher_request *req;
|
|
|
|
|
|
|
|
req = ic->sk_requests[i];
|
2017-01-04 19:23:53 +00:00
|
|
|
if (req) {
|
2020-08-07 06:18:13 +00:00
|
|
|
kfree_sensitive(req->iv);
|
2017-01-04 19:23:53 +00:00
|
|
|
skcipher_request_free(req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
kvfree(ic->sk_requests);
|
|
|
|
}
|
|
|
|
kvfree(ic->journal_tree);
|
|
|
|
if (ic->sb)
|
|
|
|
free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
|
|
|
|
|
|
|
|
if (ic->internal_hash)
|
|
|
|
crypto_free_shash(ic->internal_hash);
|
|
|
|
free_alg(&ic->internal_hash_alg);
|
|
|
|
|
|
|
|
if (ic->journal_crypt)
|
|
|
|
crypto_free_skcipher(ic->journal_crypt);
|
|
|
|
free_alg(&ic->journal_crypt_alg);
|
|
|
|
|
|
|
|
if (ic->journal_mac)
|
|
|
|
crypto_free_shash(ic->journal_mac);
|
|
|
|
free_alg(&ic->journal_mac_alg);
|
|
|
|
|
|
|
|
kfree(ic);
|
2021-09-04 09:59:29 +00:00
|
|
|
dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct target_type integrity_target = {
|
|
|
|
.name = "integrity",
|
2021-05-12 12:28:43 +00:00
|
|
|
.version = {1, 10, 0},
|
2017-01-04 19:23:53 +00:00
|
|
|
.module = THIS_MODULE,
|
|
|
|
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
|
|
|
|
.ctr = dm_integrity_ctr,
|
|
|
|
.dtr = dm_integrity_dtr,
|
|
|
|
.map = dm_integrity_map,
|
|
|
|
.postsuspend = dm_integrity_postsuspend,
|
|
|
|
.resume = dm_integrity_resume,
|
|
|
|
.status = dm_integrity_status,
|
|
|
|
.iterate_devices = dm_integrity_iterate_devices,
|
2017-04-18 20:51:52 +00:00
|
|
|
.io_hints = dm_integrity_io_hints,
|
2017-01-04 19:23:53 +00:00
|
|
|
};
|
|
|
|
|
2019-03-22 14:16:34 +00:00
|
|
|
static int __init dm_integrity_init(void)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
int r;
|
|
|
|
|
|
|
|
journal_io_cache = kmem_cache_create("integrity_journal_io",
|
|
|
|
sizeof(struct journal_io), 0, 0, NULL);
|
|
|
|
if (!journal_io_cache) {
|
|
|
|
DMERR("can't allocate journal io cache");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = dm_register_target(&integrity_target);
|
2023-04-04 17:34:28 +00:00
|
|
|
if (r < 0) {
|
|
|
|
kmem_cache_destroy(journal_io_cache);
|
|
|
|
return r;
|
|
|
|
}
|
2017-01-04 19:23:53 +00:00
|
|
|
|
2023-04-04 17:34:28 +00:00
|
|
|
return 0;
|
2017-01-04 19:23:53 +00:00
|
|
|
}
|
|
|
|
|
2019-03-22 14:16:34 +00:00
|
|
|
static void __exit dm_integrity_exit(void)
|
2017-01-04 19:23:53 +00:00
|
|
|
{
|
|
|
|
dm_unregister_target(&integrity_target);
|
|
|
|
kmem_cache_destroy(journal_io_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(dm_integrity_init);
|
|
|
|
module_exit(dm_integrity_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Milan Broz");
|
|
|
|
MODULE_AUTHOR("Mikulas Patocka");
|
|
|
|
MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
|
|
|
|
MODULE_LICENSE("GPL");
|