mirror of
https://github.com/torvalds/linux.git
synced 2024-12-11 13:41:55 +00:00
67a2422239
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlzR0AAQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpo0MD/47D1kBK9rGzkAwIz1Jkh1Qy/ITVaDJzmHJ UP5uncQsgKFLKMR1LbRcrWtmk2MwFDNULGbteHFeCYE1ypCrTgpWSp5+SJluKd1Q hma9krLSAXO9QiSaZ4jafshXFIZxz6IjakOW8c9LrT80Ze47yh7AxiLwDafcp/Jj x6NW790qB7ENDtfarDkZk14NCS8HGLRHO5B21LB+hT0Kfbh0XZaLzJdj7Mck1wPA VT8hL9mPuA++AjF7Ra4kUjwSakgmajTa3nS2fpkwTYdztQfas7x5Jiv7FWxrrelb qbabkNkWKepcHAPEiZR7o53TyfCucGeSK/jG+dsJ9KhNp26kl1ci3frl5T6PfVMP SPPDjsKIHs+dqFrU9y5rSGhLJqewTs96hHthnLGxyF67+5sRb5+YIy+dcqgiyc/b TUVyjCD6r0cO2q4v9VhwnhOyeBUA9Rwbu8nl7JV5Q45uG7qI4BC39l1jfubMNDPO GLNGUUzb6ER7z6lYINjRSF2Jhejsx8SR9P7jhpb1Q7k/VvDDxO1T4FpwvqWFz9+s Gn+s6//+cA6LL+42eZkQjvwF2CUNE7TaVT8zdb+s5HP1RQkZToqUnsQCGeRTrFni RqWXfW9o9+awYRp431417oMdX/LvLGq9+ZtifRk9DqDcowXevTaf0W2RpplWSuiX RcCuPeLAVg== =Ot0g -----END PGP SIGNATURE----- Merge tag 'for-5.2/block-20190507' of git://git.kernel.dk/linux-block Pull block updates from Jens Axboe: "Nothing major in this series, just fixes and improvements all over the map. This contains: - Series of fixes for sed-opal (David, Jonas) - Fixes and performance tweaks for BFQ (via Paolo) - Set of fixes for bcache (via Coly) - Set of fixes for md (via Song) - Enabling multi-page for passthrough requests (Ming) - Queue release fix series (Ming) - Device notification improvements (Martin) - Propagate underlying device rotational status in loop (Holger) - Removal of mtip32xx trim support, which has been disabled for years (Christoph) - Improvement and cleanup of nvme command handling (Christoph) - Add block SPDX tags (Christoph) - Cleanup/hardening of bio/bvec iteration (Christoph) - A few NVMe pull requests (Christoph) - Removal of CONFIG_LBDAF (Christoph) - Various little fixes here and there" * tag 'for-5.2/block-20190507' of git://git.kernel.dk/linux-block: (164 commits) block: fix mismerge in bvec_advance block: don't drain in-progress dispatch in blk_cleanup_queue() blk-mq: move cancel of hctx->run_work into blk_mq_hw_sysfs_release blk-mq: always free hctx after request queue is freed blk-mq: split blk_mq_alloc_and_init_hctx into two parts blk-mq: free hw queue's resource in hctx's release handler blk-mq: move cancel of requeue_work into blk_mq_release blk-mq: grab .q_usage_counter when queuing request from plug code path block: fix function name in comment nvmet: protect discovery change log event list iteration nvme: mark nvme_core_init and nvme_core_exit static nvme: move command size checks to the core nvme-fabrics: check more command sizes nvme-pci: check more command sizes nvme-pci: remove an unneeded variable initialization nvme-pci: unquiesce admin queue on shutdown nvme-pci: shutdown on timeout during deletion nvme-pci: fix psdt field for single segment sgls nvme-multipath: don't print ANA group state by default nvme-multipath: split bios with the ns_head bio_set before submitting ...
433 lines
11 KiB
C
433 lines
11 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* blk-integrity.c - Block layer data integrity extensions
|
|
*
|
|
* Copyright (C) 2007, 2008 Oracle Corporation
|
|
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
|
|
*/
|
|
|
|
#include <linux/blkdev.h>
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/export.h>
|
|
#include <linux/slab.h>
|
|
|
|
#include "blk.h"
|
|
|
|
/**
|
|
* blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
|
|
* @q: request queue
|
|
* @bio: bio with integrity metadata attached
|
|
*
|
|
* Description: Returns the number of elements required in a
|
|
* scatterlist corresponding to the integrity metadata in a bio.
|
|
*/
|
|
int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
|
|
{
|
|
struct bio_vec iv, ivprv = { NULL };
|
|
unsigned int segments = 0;
|
|
unsigned int seg_size = 0;
|
|
struct bvec_iter iter;
|
|
int prev = 0;
|
|
|
|
bio_for_each_integrity_vec(iv, bio, iter) {
|
|
|
|
if (prev) {
|
|
if (!biovec_phys_mergeable(q, &ivprv, &iv))
|
|
goto new_segment;
|
|
if (seg_size + iv.bv_len > queue_max_segment_size(q))
|
|
goto new_segment;
|
|
|
|
seg_size += iv.bv_len;
|
|
} else {
|
|
new_segment:
|
|
segments++;
|
|
seg_size = iv.bv_len;
|
|
}
|
|
|
|
prev = 1;
|
|
ivprv = iv;
|
|
}
|
|
|
|
return segments;
|
|
}
|
|
EXPORT_SYMBOL(blk_rq_count_integrity_sg);
|
|
|
|
/**
|
|
* blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
|
|
* @q: request queue
|
|
* @bio: bio with integrity metadata attached
|
|
* @sglist: target scatterlist
|
|
*
|
|
* Description: Map the integrity vectors in request into a
|
|
* scatterlist. The scatterlist must be big enough to hold all
|
|
* elements. I.e. sized using blk_rq_count_integrity_sg().
|
|
*/
|
|
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
|
|
struct scatterlist *sglist)
|
|
{
|
|
struct bio_vec iv, ivprv = { NULL };
|
|
struct scatterlist *sg = NULL;
|
|
unsigned int segments = 0;
|
|
struct bvec_iter iter;
|
|
int prev = 0;
|
|
|
|
bio_for_each_integrity_vec(iv, bio, iter) {
|
|
|
|
if (prev) {
|
|
if (!biovec_phys_mergeable(q, &ivprv, &iv))
|
|
goto new_segment;
|
|
if (sg->length + iv.bv_len > queue_max_segment_size(q))
|
|
goto new_segment;
|
|
|
|
sg->length += iv.bv_len;
|
|
} else {
|
|
new_segment:
|
|
if (!sg)
|
|
sg = sglist;
|
|
else {
|
|
sg_unmark_end(sg);
|
|
sg = sg_next(sg);
|
|
}
|
|
|
|
sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
|
|
segments++;
|
|
}
|
|
|
|
prev = 1;
|
|
ivprv = iv;
|
|
}
|
|
|
|
if (sg)
|
|
sg_mark_end(sg);
|
|
|
|
return segments;
|
|
}
|
|
EXPORT_SYMBOL(blk_rq_map_integrity_sg);
|
|
|
|
/**
|
|
* blk_integrity_compare - Compare integrity profile of two disks
|
|
* @gd1: Disk to compare
|
|
* @gd2: Disk to compare
|
|
*
|
|
* Description: Meta-devices like DM and MD need to verify that all
|
|
* sub-devices use the same integrity format before advertising to
|
|
* upper layers that they can send/receive integrity metadata. This
|
|
* function can be used to check whether two gendisk devices have
|
|
* compatible integrity formats.
|
|
*/
|
|
int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
|
|
{
|
|
struct blk_integrity *b1 = &gd1->queue->integrity;
|
|
struct blk_integrity *b2 = &gd2->queue->integrity;
|
|
|
|
if (!b1->profile && !b2->profile)
|
|
return 0;
|
|
|
|
if (!b1->profile || !b2->profile)
|
|
return -1;
|
|
|
|
if (b1->interval_exp != b2->interval_exp) {
|
|
pr_err("%s: %s/%s protection interval %u != %u\n",
|
|
__func__, gd1->disk_name, gd2->disk_name,
|
|
1 << b1->interval_exp, 1 << b2->interval_exp);
|
|
return -1;
|
|
}
|
|
|
|
if (b1->tuple_size != b2->tuple_size) {
|
|
pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
|
|
gd1->disk_name, gd2->disk_name,
|
|
b1->tuple_size, b2->tuple_size);
|
|
return -1;
|
|
}
|
|
|
|
if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
|
|
pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
|
|
gd1->disk_name, gd2->disk_name,
|
|
b1->tag_size, b2->tag_size);
|
|
return -1;
|
|
}
|
|
|
|
if (b1->profile != b2->profile) {
|
|
pr_err("%s: %s/%s type %s != %s\n", __func__,
|
|
gd1->disk_name, gd2->disk_name,
|
|
b1->profile->name, b2->profile->name);
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(blk_integrity_compare);
|
|
|
|
bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
|
|
struct request *next)
|
|
{
|
|
if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
|
|
return true;
|
|
|
|
if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
|
|
return false;
|
|
|
|
if (bio_integrity(req->bio)->bip_flags !=
|
|
bio_integrity(next->bio)->bip_flags)
|
|
return false;
|
|
|
|
if (req->nr_integrity_segments + next->nr_integrity_segments >
|
|
q->limits.max_integrity_segments)
|
|
return false;
|
|
|
|
if (integrity_req_gap_back_merge(req, next->bio))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(blk_integrity_merge_rq);
|
|
|
|
bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
int nr_integrity_segs;
|
|
struct bio *next = bio->bi_next;
|
|
|
|
if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
|
|
return true;
|
|
|
|
if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
|
|
return false;
|
|
|
|
if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
|
|
return false;
|
|
|
|
bio->bi_next = NULL;
|
|
nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
|
|
bio->bi_next = next;
|
|
|
|
if (req->nr_integrity_segments + nr_integrity_segs >
|
|
q->limits.max_integrity_segments)
|
|
return false;
|
|
|
|
req->nr_integrity_segments += nr_integrity_segs;
|
|
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL(blk_integrity_merge_bio);
|
|
|
|
struct integrity_sysfs_entry {
|
|
struct attribute attr;
|
|
ssize_t (*show)(struct blk_integrity *, char *);
|
|
ssize_t (*store)(struct blk_integrity *, const char *, size_t);
|
|
};
|
|
|
|
static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
|
|
char *page)
|
|
{
|
|
struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
|
|
struct blk_integrity *bi = &disk->queue->integrity;
|
|
struct integrity_sysfs_entry *entry =
|
|
container_of(attr, struct integrity_sysfs_entry, attr);
|
|
|
|
return entry->show(bi, page);
|
|
}
|
|
|
|
static ssize_t integrity_attr_store(struct kobject *kobj,
|
|
struct attribute *attr, const char *page,
|
|
size_t count)
|
|
{
|
|
struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
|
|
struct blk_integrity *bi = &disk->queue->integrity;
|
|
struct integrity_sysfs_entry *entry =
|
|
container_of(attr, struct integrity_sysfs_entry, attr);
|
|
ssize_t ret = 0;
|
|
|
|
if (entry->store)
|
|
ret = entry->store(bi, page, count);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
if (bi->profile && bi->profile->name)
|
|
return sprintf(page, "%s\n", bi->profile->name);
|
|
else
|
|
return sprintf(page, "none\n");
|
|
}
|
|
|
|
static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
return sprintf(page, "%u\n", bi->tag_size);
|
|
}
|
|
|
|
static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
return sprintf(page, "%u\n",
|
|
bi->interval_exp ? 1 << bi->interval_exp : 0);
|
|
}
|
|
|
|
static ssize_t integrity_verify_store(struct blk_integrity *bi,
|
|
const char *page, size_t count)
|
|
{
|
|
char *p = (char *) page;
|
|
unsigned long val = simple_strtoul(p, &p, 10);
|
|
|
|
if (val)
|
|
bi->flags |= BLK_INTEGRITY_VERIFY;
|
|
else
|
|
bi->flags &= ~BLK_INTEGRITY_VERIFY;
|
|
|
|
return count;
|
|
}
|
|
|
|
static ssize_t integrity_verify_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0);
|
|
}
|
|
|
|
static ssize_t integrity_generate_store(struct blk_integrity *bi,
|
|
const char *page, size_t count)
|
|
{
|
|
char *p = (char *) page;
|
|
unsigned long val = simple_strtoul(p, &p, 10);
|
|
|
|
if (val)
|
|
bi->flags |= BLK_INTEGRITY_GENERATE;
|
|
else
|
|
bi->flags &= ~BLK_INTEGRITY_GENERATE;
|
|
|
|
return count;
|
|
}
|
|
|
|
static ssize_t integrity_generate_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0);
|
|
}
|
|
|
|
static ssize_t integrity_device_show(struct blk_integrity *bi, char *page)
|
|
{
|
|
return sprintf(page, "%u\n",
|
|
(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0);
|
|
}
|
|
|
|
static struct integrity_sysfs_entry integrity_format_entry = {
|
|
.attr = { .name = "format", .mode = 0444 },
|
|
.show = integrity_format_show,
|
|
};
|
|
|
|
static struct integrity_sysfs_entry integrity_tag_size_entry = {
|
|
.attr = { .name = "tag_size", .mode = 0444 },
|
|
.show = integrity_tag_size_show,
|
|
};
|
|
|
|
static struct integrity_sysfs_entry integrity_interval_entry = {
|
|
.attr = { .name = "protection_interval_bytes", .mode = 0444 },
|
|
.show = integrity_interval_show,
|
|
};
|
|
|
|
static struct integrity_sysfs_entry integrity_verify_entry = {
|
|
.attr = { .name = "read_verify", .mode = 0644 },
|
|
.show = integrity_verify_show,
|
|
.store = integrity_verify_store,
|
|
};
|
|
|
|
static struct integrity_sysfs_entry integrity_generate_entry = {
|
|
.attr = { .name = "write_generate", .mode = 0644 },
|
|
.show = integrity_generate_show,
|
|
.store = integrity_generate_store,
|
|
};
|
|
|
|
static struct integrity_sysfs_entry integrity_device_entry = {
|
|
.attr = { .name = "device_is_integrity_capable", .mode = 0444 },
|
|
.show = integrity_device_show,
|
|
};
|
|
|
|
static struct attribute *integrity_attrs[] = {
|
|
&integrity_format_entry.attr,
|
|
&integrity_tag_size_entry.attr,
|
|
&integrity_interval_entry.attr,
|
|
&integrity_verify_entry.attr,
|
|
&integrity_generate_entry.attr,
|
|
&integrity_device_entry.attr,
|
|
NULL,
|
|
};
|
|
ATTRIBUTE_GROUPS(integrity);
|
|
|
|
static const struct sysfs_ops integrity_ops = {
|
|
.show = &integrity_attr_show,
|
|
.store = &integrity_attr_store,
|
|
};
|
|
|
|
static struct kobj_type integrity_ktype = {
|
|
.default_groups = integrity_groups,
|
|
.sysfs_ops = &integrity_ops,
|
|
};
|
|
|
|
static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
|
|
{
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
static const struct blk_integrity_profile nop_profile = {
|
|
.name = "nop",
|
|
.generate_fn = blk_integrity_nop_fn,
|
|
.verify_fn = blk_integrity_nop_fn,
|
|
};
|
|
|
|
/**
|
|
* blk_integrity_register - Register a gendisk as being integrity-capable
|
|
* @disk: struct gendisk pointer to make integrity-aware
|
|
* @template: block integrity profile to register
|
|
*
|
|
* Description: When a device needs to advertise itself as being able to
|
|
* send/receive integrity metadata it must use this function to register
|
|
* the capability with the block layer. The template is a blk_integrity
|
|
* struct with values appropriate for the underlying hardware. See
|
|
* Documentation/block/data-integrity.txt.
|
|
*/
|
|
void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
|
|
{
|
|
struct blk_integrity *bi = &disk->queue->integrity;
|
|
|
|
bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
|
|
template->flags;
|
|
bi->interval_exp = template->interval_exp ? :
|
|
ilog2(queue_logical_block_size(disk->queue));
|
|
bi->profile = template->profile ? template->profile : &nop_profile;
|
|
bi->tuple_size = template->tuple_size;
|
|
bi->tag_size = template->tag_size;
|
|
|
|
disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
|
}
|
|
EXPORT_SYMBOL(blk_integrity_register);
|
|
|
|
/**
|
|
* blk_integrity_unregister - Unregister block integrity profile
|
|
* @disk: disk whose integrity profile to unregister
|
|
*
|
|
* Description: This function unregisters the integrity capability from
|
|
* a block device.
|
|
*/
|
|
void blk_integrity_unregister(struct gendisk *disk)
|
|
{
|
|
disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
|
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
|
|
}
|
|
EXPORT_SYMBOL(blk_integrity_unregister);
|
|
|
|
void blk_integrity_add(struct gendisk *disk)
|
|
{
|
|
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
|
|
&disk_to_dev(disk)->kobj, "%s", "integrity"))
|
|
return;
|
|
|
|
kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
|
|
}
|
|
|
|
void blk_integrity_del(struct gendisk *disk)
|
|
{
|
|
kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
|
|
kobject_del(&disk->integrity_kobj);
|
|
kobject_put(&disk->integrity_kobj);
|
|
}
|