mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge patch series "Use block pr_ops in LIO"
Mike Christie <michael.christie@oracle.com> says: The patches in this thread allow us to use the block pr_ops with LIO's target_core_iblock module to support cluster applications in VMs. They were built over Linus's tree. They also apply over linux-next and Martin's tree and Jens's trees. Currently, to use windows clustering or linux clustering (pacemaker + cluster labs scsi fence agents) in VMs with LIO and vhost-scsi, you have to use tcmu or pscsi or use a cluster aware FS/framework for the LIO pr file. Setting up a cluster FS/framework is pain and waste when your real backend device is already a distributed device, and pscsi and tcmu are nice for specific use cases, but iblock gives you the best performance and allows you to use stacked devices like dm-multipath. So these patches allow iblock to work like pscsi/tcmu where they can pass a PR command to the backend module. And then iblock will use the pr_ops to pass the PR command to the real devices similar to what we do for unmap today. The patches are separated in the following groups: Patch 1 - 2: - Add block layer callouts for reading reservations and rename reservation error code. Patch 3 - 5: - SCSI support for new callouts. Patch 6: - DM support for new callouts. Patch 7 - 13: - NVMe support for new callouts. Patch 14 - 18: - LIO support for new callouts. This patchset has been tested with the libiscsi PGR ops and with window's failover cluster verification test. Note that for scsi backend devices we need this patchset: https://lore.kernel.org/linux-scsi/20230123221046.125483-1-michael.christie@oracle.com/T/#m4834a643ffb5bac2529d65d40906d3cfbdd9b1b7 to handle UAs. To reduce the size of this patchset that's being done separately to make reviewing easier. And to make merging easier this patchset and the one above do not have any conflicts so can be merged in different trees. Link: https://lore.kernel.org/r/20230407200551.12660-1-michael.christie@oracle.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
7907ad748b
@ -155,7 +155,7 @@ static const struct {
|
||||
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
|
||||
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
|
||||
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
|
||||
[BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
|
||||
[BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
|
||||
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
|
||||
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
|
||||
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
|
||||
|
@ -3132,6 +3132,8 @@ struct dm_pr {
|
||||
bool fail_early;
|
||||
int ret;
|
||||
enum pr_type type;
|
||||
struct pr_keys *read_keys;
|
||||
struct pr_held_reservation *rsv;
|
||||
};
|
||||
|
||||
static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
|
||||
@ -3364,12 +3366,79 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct dm_pr *pr = data;
|
||||
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
|
||||
|
||||
if (!ops || !ops->pr_read_keys) {
|
||||
pr->ret = -EOPNOTSUPP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
|
||||
if (!pr->ret)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
|
||||
{
|
||||
struct dm_pr pr = {
|
||||
.read_keys = keys,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return pr.ret;
|
||||
}
|
||||
|
||||
static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct dm_pr *pr = data;
|
||||
const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
|
||||
|
||||
if (!ops || !ops->pr_read_reservation) {
|
||||
pr->ret = -EOPNOTSUPP;
|
||||
return -1;
|
||||
}
|
||||
|
||||
pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
|
||||
if (!pr->ret)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dm_pr_read_reservation(struct block_device *bdev,
|
||||
struct pr_held_reservation *rsv)
|
||||
{
|
||||
struct dm_pr pr = {
|
||||
.rsv = rsv,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return pr.ret;
|
||||
}
|
||||
|
||||
static const struct pr_ops dm_pr_ops = {
|
||||
.pr_register = dm_pr_register,
|
||||
.pr_reserve = dm_pr_reserve,
|
||||
.pr_release = dm_pr_release,
|
||||
.pr_preempt = dm_pr_preempt,
|
||||
.pr_clear = dm_pr_clear,
|
||||
.pr_read_keys = dm_pr_read_keys,
|
||||
.pr_read_reservation = dm_pr_read_reservation,
|
||||
};
|
||||
|
||||
static const struct block_device_operations dm_blk_dops = {
|
||||
|
@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o
|
||||
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
|
||||
obj-$(CONFIG_NVME_APPLE) += nvme-apple.o
|
||||
|
||||
nvme-core-y += core.o ioctl.o
|
||||
nvme-core-y += core.o ioctl.o pr.o
|
||||
nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o
|
||||
nvme-core-$(CONFIG_TRACING) += trace.o
|
||||
nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o
|
||||
|
@ -279,7 +279,7 @@ static blk_status_t nvme_error_status(u16 status)
|
||||
case NVME_SC_INVALID_PI:
|
||||
return BLK_STS_PROTECTION;
|
||||
case NVME_SC_RESERVATION_CONFLICT:
|
||||
return BLK_STS_NEXUS;
|
||||
return BLK_STS_RESV_CONFLICT;
|
||||
case NVME_SC_HOST_PATH_ERROR:
|
||||
return BLK_STS_TRANSPORT;
|
||||
case NVME_SC_ZONE_TOO_MANY_ACTIVE:
|
||||
@ -2061,153 +2061,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info)
|
||||
}
|
||||
}
|
||||
|
||||
static char nvme_pr_type(enum pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case PR_WRITE_EXCLUSIVE:
|
||||
return 1;
|
||||
case PR_EXCLUSIVE_ACCESS:
|
||||
return 2;
|
||||
case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return 3;
|
||||
case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return 4;
|
||||
case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return 5;
|
||||
case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return 6;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
|
||||
struct nvme_command *c, u8 data[16])
|
||||
{
|
||||
struct nvme_ns_head *head = bdev->bd_disk->private_data;
|
||||
int srcu_idx = srcu_read_lock(&head->srcu);
|
||||
struct nvme_ns *ns = nvme_find_path(head);
|
||||
int ret = -EWOULDBLOCK;
|
||||
|
||||
if (ns) {
|
||||
c->common.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
ret = nvme_submit_sync_cmd(ns->queue, c, data, 16);
|
||||
}
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
|
||||
u8 data[16])
|
||||
{
|
||||
c->common.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
return nvme_submit_sync_cmd(ns->queue, c, data, 16);
|
||||
}
|
||||
|
||||
static int nvme_sc_to_pr_err(int nvme_sc)
|
||||
{
|
||||
if (nvme_is_path_error(nvme_sc))
|
||||
return PR_STS_PATH_FAILED;
|
||||
|
||||
switch (nvme_sc) {
|
||||
case NVME_SC_SUCCESS:
|
||||
return PR_STS_SUCCESS;
|
||||
case NVME_SC_RESERVATION_CONFLICT:
|
||||
return PR_STS_RESERVATION_CONFLICT;
|
||||
case NVME_SC_ONCS_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case NVME_SC_BAD_ATTRIBUTES:
|
||||
case NVME_SC_INVALID_OPCODE:
|
||||
case NVME_SC_INVALID_FIELD:
|
||||
case NVME_SC_INVALID_NS:
|
||||
return -EINVAL;
|
||||
default:
|
||||
return PR_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
||||
u64 key, u64 sa_key, u8 op)
|
||||
{
|
||||
struct nvme_command c = { };
|
||||
u8 data[16] = { 0, };
|
||||
int ret;
|
||||
|
||||
put_unaligned_le64(key, &data[0]);
|
||||
put_unaligned_le64(sa_key, &data[8]);
|
||||
|
||||
c.common.opcode = op;
|
||||
c.common.cdw10 = cpu_to_le32(cdw10);
|
||||
|
||||
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
|
||||
bdev->bd_disk->fops == &nvme_ns_head_ops)
|
||||
ret = nvme_send_ns_head_pr_command(bdev, &c, data);
|
||||
else
|
||||
ret = nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c,
|
||||
data);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return nvme_sc_to_pr_err(ret);
|
||||
}
|
||||
|
||||
static int nvme_pr_register(struct block_device *bdev, u64 old,
|
||||
u64 new, unsigned flags)
|
||||
{
|
||||
u32 cdw10;
|
||||
|
||||
if (flags & ~PR_FL_IGNORE_KEY)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cdw10 = old ? 2 : 0;
|
||||
cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
|
||||
cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
|
||||
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
|
||||
}
|
||||
|
||||
static int nvme_pr_reserve(struct block_device *bdev, u64 key,
|
||||
enum pr_type type, unsigned flags)
|
||||
{
|
||||
u32 cdw10;
|
||||
|
||||
if (flags & ~PR_FL_IGNORE_KEY)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cdw10 = nvme_pr_type(type) << 8;
|
||||
cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
|
||||
}
|
||||
|
||||
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
|
||||
enum pr_type type, bool abort)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
|
||||
}
|
||||
|
||||
static int nvme_pr_clear(struct block_device *bdev, u64 key)
|
||||
{
|
||||
u32 cdw10 = 1 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
const struct pr_ops nvme_pr_ops = {
|
||||
.pr_register = nvme_pr_register,
|
||||
.pr_reserve = nvme_pr_reserve,
|
||||
.pr_release = nvme_pr_release,
|
||||
.pr_preempt = nvme_pr_preempt,
|
||||
.pr_clear = nvme_pr_clear,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BLK_SED_OPAL
|
||||
static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
||||
bool send)
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
#include <trace/events/block.h>
|
||||
|
||||
extern const struct pr_ops nvme_pr_ops;
|
||||
|
||||
extern unsigned int nvme_io_timeout;
|
||||
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
|
||||
|
||||
|
315
drivers/nvme/host/pr.c
Normal file
315
drivers/nvme/host/pr.c
Normal file
@ -0,0 +1,315 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2015 Intel Corporation
|
||||
* Keith Busch <kbusch@kernel.org>
|
||||
*/
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/pr.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "nvme.h"
|
||||
|
||||
static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case PR_WRITE_EXCLUSIVE:
|
||||
return NVME_PR_WRITE_EXCLUSIVE;
|
||||
case PR_EXCLUSIVE_ACCESS:
|
||||
return NVME_PR_EXCLUSIVE_ACCESS;
|
||||
case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY;
|
||||
case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY;
|
||||
case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS;
|
||||
case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case NVME_PR_WRITE_EXCLUSIVE:
|
||||
return PR_WRITE_EXCLUSIVE;
|
||||
case NVME_PR_EXCLUSIVE_ACCESS:
|
||||
return PR_EXCLUSIVE_ACCESS;
|
||||
case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return PR_WRITE_EXCLUSIVE_REG_ONLY;
|
||||
case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return PR_EXCLUSIVE_ACCESS_REG_ONLY;
|
||||
case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return PR_WRITE_EXCLUSIVE_ALL_REGS;
|
||||
case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return PR_EXCLUSIVE_ACCESS_ALL_REGS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
|
||||
struct nvme_command *c, void *data, unsigned int data_len)
|
||||
{
|
||||
struct nvme_ns_head *head = bdev->bd_disk->private_data;
|
||||
int srcu_idx = srcu_read_lock(&head->srcu);
|
||||
struct nvme_ns *ns = nvme_find_path(head);
|
||||
int ret = -EWOULDBLOCK;
|
||||
|
||||
if (ns) {
|
||||
c->common.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len);
|
||||
}
|
||||
srcu_read_unlock(&head->srcu, srcu_idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c,
|
||||
void *data, unsigned int data_len)
|
||||
{
|
||||
c->common.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
return nvme_submit_sync_cmd(ns->queue, c, data, data_len);
|
||||
}
|
||||
|
||||
static int nvme_sc_to_pr_err(int nvme_sc)
|
||||
{
|
||||
if (nvme_is_path_error(nvme_sc))
|
||||
return PR_STS_PATH_FAILED;
|
||||
|
||||
switch (nvme_sc) {
|
||||
case NVME_SC_SUCCESS:
|
||||
return PR_STS_SUCCESS;
|
||||
case NVME_SC_RESERVATION_CONFLICT:
|
||||
return PR_STS_RESERVATION_CONFLICT;
|
||||
case NVME_SC_ONCS_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case NVME_SC_BAD_ATTRIBUTES:
|
||||
case NVME_SC_INVALID_OPCODE:
|
||||
case NVME_SC_INVALID_FIELD:
|
||||
case NVME_SC_INVALID_NS:
|
||||
return -EINVAL;
|
||||
default:
|
||||
return PR_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_send_pr_command(struct block_device *bdev,
|
||||
struct nvme_command *c, void *data, unsigned int data_len)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
|
||||
bdev->bd_disk->fops == &nvme_ns_head_ops)
|
||||
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
|
||||
|
||||
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
|
||||
data_len);
|
||||
}
|
||||
|
||||
static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
||||
u64 key, u64 sa_key, u8 op)
|
||||
{
|
||||
struct nvme_command c = { };
|
||||
u8 data[16] = { 0, };
|
||||
int ret;
|
||||
|
||||
put_unaligned_le64(key, &data[0]);
|
||||
put_unaligned_le64(sa_key, &data[8]);
|
||||
|
||||
c.common.opcode = op;
|
||||
c.common.cdw10 = cpu_to_le32(cdw10);
|
||||
|
||||
ret = nvme_send_pr_command(bdev, &c, data, sizeof(data));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return nvme_sc_to_pr_err(ret);
|
||||
}
|
||||
|
||||
static int nvme_pr_register(struct block_device *bdev, u64 old,
|
||||
u64 new, unsigned flags)
|
||||
{
|
||||
u32 cdw10;
|
||||
|
||||
if (flags & ~PR_FL_IGNORE_KEY)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cdw10 = old ? 2 : 0;
|
||||
cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
|
||||
cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
|
||||
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
|
||||
}
|
||||
|
||||
static int nvme_pr_reserve(struct block_device *bdev, u64 key,
|
||||
enum pr_type type, unsigned flags)
|
||||
{
|
||||
u32 cdw10;
|
||||
|
||||
if (flags & ~PR_FL_IGNORE_KEY)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cdw10 = nvme_pr_type_from_blk(type) << 8;
|
||||
cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
|
||||
}
|
||||
|
||||
static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
|
||||
enum pr_type type, bool abort)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
|
||||
}
|
||||
|
||||
static int nvme_pr_clear(struct block_device *bdev, u64 key)
|
||||
{
|
||||
u32 cdw10 = 1 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
||||
{
|
||||
u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3);
|
||||
|
||||
return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
|
||||
}
|
||||
|
||||
static int nvme_pr_resv_report(struct block_device *bdev, void *data,
|
||||
u32 data_len, bool *eds)
|
||||
{
|
||||
struct nvme_command c = { };
|
||||
int ret;
|
||||
|
||||
c.common.opcode = nvme_cmd_resv_report;
|
||||
c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len));
|
||||
c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT);
|
||||
*eds = true;
|
||||
|
||||
retry:
|
||||
ret = nvme_send_pr_command(bdev, &c, data, data_len);
|
||||
if (ret == NVME_SC_HOST_ID_INCONSIST &&
|
||||
c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) {
|
||||
c.common.cdw11 = 0;
|
||||
*eds = false;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return nvme_sc_to_pr_err(ret);
|
||||
}
|
||||
|
||||
static int nvme_pr_read_keys(struct block_device *bdev,
|
||||
struct pr_keys *keys_info)
|
||||
{
|
||||
u32 rse_len, num_keys = keys_info->num_keys;
|
||||
struct nvme_reservation_status_ext *rse;
|
||||
int ret, i;
|
||||
bool eds;
|
||||
|
||||
/*
|
||||
* Assume we are using 128-bit host IDs and allocate a buffer large
|
||||
* enough to get enough keys to fill the return keys buffer.
|
||||
*/
|
||||
rse_len = struct_size(rse, regctl_eds, num_keys);
|
||||
rse = kzalloc(rse_len, GFP_KERNEL);
|
||||
if (!rse)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
|
||||
if (ret)
|
||||
goto free_rse;
|
||||
|
||||
keys_info->generation = le32_to_cpu(rse->gen);
|
||||
keys_info->num_keys = get_unaligned_le16(&rse->regctl);
|
||||
|
||||
num_keys = min(num_keys, keys_info->num_keys);
|
||||
for (i = 0; i < num_keys; i++) {
|
||||
if (eds) {
|
||||
keys_info->keys[i] =
|
||||
le64_to_cpu(rse->regctl_eds[i].rkey);
|
||||
} else {
|
||||
struct nvme_reservation_status *rs;
|
||||
|
||||
rs = (struct nvme_reservation_status *)rse;
|
||||
keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey);
|
||||
}
|
||||
}
|
||||
|
||||
free_rse:
|
||||
kfree(rse);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_pr_read_reservation(struct block_device *bdev,
|
||||
struct pr_held_reservation *resv)
|
||||
{
|
||||
struct nvme_reservation_status_ext tmp_rse, *rse;
|
||||
int ret, i, num_regs;
|
||||
u32 rse_len;
|
||||
bool eds;
|
||||
|
||||
get_num_regs:
|
||||
/*
|
||||
* Get the number of registrations so we know how big to allocate
|
||||
* the response buffer.
|
||||
*/
|
||||
ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
num_regs = get_unaligned_le16(&tmp_rse.regctl);
|
||||
if (!num_regs) {
|
||||
resv->generation = le32_to_cpu(tmp_rse.gen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rse_len = struct_size(rse, regctl_eds, num_regs);
|
||||
rse = kzalloc(rse_len, GFP_KERNEL);
|
||||
if (!rse)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
|
||||
if (ret)
|
||||
goto free_rse;
|
||||
|
||||
if (num_regs != get_unaligned_le16(&rse->regctl)) {
|
||||
kfree(rse);
|
||||
goto get_num_regs;
|
||||
}
|
||||
|
||||
resv->generation = le32_to_cpu(rse->gen);
|
||||
resv->type = block_pr_type_from_nvme(rse->rtype);
|
||||
|
||||
for (i = 0; i < num_regs; i++) {
|
||||
if (eds) {
|
||||
if (rse->regctl_eds[i].rcsts) {
|
||||
resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
struct nvme_reservation_status *rs;
|
||||
|
||||
rs = (struct nvme_reservation_status *)rse;
|
||||
if (rs->regctl_ds[i].rcsts) {
|
||||
resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
free_rse:
|
||||
kfree(rse);
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct pr_ops nvme_pr_ops = {
|
||||
.pr_register = nvme_pr_register,
|
||||
.pr_reserve = nvme_pr_reserve,
|
||||
.pr_release = nvme_pr_release,
|
||||
.pr_preempt = nvme_pr_preempt,
|
||||
.pr_clear = nvme_pr_clear,
|
||||
.pr_read_keys = nvme_pr_read_keys,
|
||||
.pr_read_reservation = nvme_pr_read_reservation,
|
||||
};
|
@ -2737,7 +2737,12 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
|
||||
else if (status == 0) {
|
||||
switch (cqr->intrc) {
|
||||
case -EPERM:
|
||||
error = BLK_STS_NEXUS;
|
||||
/*
|
||||
* DASD doesn't implement SCSI/NVMe reservations, but it
|
||||
* implements a locking scheme similar to them. We
|
||||
* return this error when we no longer have the lock.
|
||||
*/
|
||||
error = BLK_STS_RESV_CONFLICT;
|
||||
break;
|
||||
case -ENOLINK:
|
||||
error = BLK_STS_TRANSPORT;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <uapi/linux/pr.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
|
||||
@ -63,6 +64,48 @@ const char *scsi_device_type(unsigned type)
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_device_type);
|
||||
|
||||
enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case SCSI_PR_WRITE_EXCLUSIVE:
|
||||
return PR_WRITE_EXCLUSIVE;
|
||||
case SCSI_PR_EXCLUSIVE_ACCESS:
|
||||
return PR_EXCLUSIVE_ACCESS;
|
||||
case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return PR_WRITE_EXCLUSIVE_REG_ONLY;
|
||||
case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return PR_EXCLUSIVE_ACCESS_REG_ONLY;
|
||||
case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return PR_WRITE_EXCLUSIVE_ALL_REGS;
|
||||
case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return PR_EXCLUSIVE_ACCESS_ALL_REGS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scsi_pr_type_to_block);
|
||||
|
||||
enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case PR_WRITE_EXCLUSIVE:
|
||||
return SCSI_PR_WRITE_EXCLUSIVE;
|
||||
case PR_EXCLUSIVE_ACCESS:
|
||||
return SCSI_PR_EXCLUSIVE_ACCESS;
|
||||
case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY;
|
||||
case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY;
|
||||
case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS;
|
||||
case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(block_pr_type_to_scsi);
|
||||
|
||||
/**
|
||||
* scsilun_to_int - convert a scsi_lun to an int
|
||||
* @scsilun: struct scsi_lun to be converted.
|
||||
|
@ -599,7 +599,7 @@ static blk_status_t scsi_result_to_blk_status(int result)
|
||||
case SCSIML_STAT_OK:
|
||||
break;
|
||||
case SCSIML_STAT_RESV_CONFLICT:
|
||||
return BLK_STS_NEXUS;
|
||||
return BLK_STS_RESV_CONFLICT;
|
||||
case SCSIML_STAT_NOSPC:
|
||||
return BLK_STS_NOSPC;
|
||||
case SCSIML_STAT_MED_ERROR:
|
||||
|
@ -67,6 +67,7 @@
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_ioctl.h>
|
||||
#include <scsi/scsicam.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
|
||||
#include "sd.h"
|
||||
#include "scsi_priv.h"
|
||||
@ -1691,26 +1692,6 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static char sd_pr_type(enum pr_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case PR_WRITE_EXCLUSIVE:
|
||||
return 0x01;
|
||||
case PR_EXCLUSIVE_ACCESS:
|
||||
return 0x03;
|
||||
case PR_WRITE_EXCLUSIVE_REG_ONLY:
|
||||
return 0x05;
|
||||
case PR_EXCLUSIVE_ACCESS_REG_ONLY:
|
||||
return 0x06;
|
||||
case PR_WRITE_EXCLUSIVE_ALL_REGS:
|
||||
return 0x07;
|
||||
case PR_EXCLUSIVE_ACCESS_ALL_REGS:
|
||||
return 0x08;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
|
||||
{
|
||||
switch (host_byte(result)) {
|
||||
@ -1741,8 +1722,97 @@ static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_pr_command(struct block_device *bdev, u8 sa,
|
||||
u64 key, u64 sa_key, u8 type, u8 flags)
|
||||
static int sd_pr_in_command(struct block_device *bdev, u8 sa,
|
||||
unsigned char *data, int data_len)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
|
||||
struct scsi_device *sdev = sdkp->device;
|
||||
struct scsi_sense_hdr sshdr;
|
||||
u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa };
|
||||
const struct scsi_exec_args exec_args = {
|
||||
.sshdr = &sshdr,
|
||||
};
|
||||
int result;
|
||||
|
||||
put_unaligned_be16(data_len, &cmd[7]);
|
||||
|
||||
result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len,
|
||||
SD_TIMEOUT, sdkp->max_retries, &exec_args);
|
||||
if (scsi_status_is_check_condition(result) &&
|
||||
scsi_sense_valid(&sshdr)) {
|
||||
sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
|
||||
scsi_print_sense_hdr(sdev, NULL, &sshdr);
|
||||
}
|
||||
|
||||
if (result <= 0)
|
||||
return result;
|
||||
|
||||
return sd_scsi_to_pr_err(&sshdr, result);
|
||||
}
|
||||
|
||||
static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info)
|
||||
{
|
||||
int result, i, data_offset, num_copy_keys;
|
||||
u32 num_keys = keys_info->num_keys;
|
||||
int data_len = num_keys * 8 + 8;
|
||||
u8 *data;
|
||||
|
||||
data = kzalloc(data_len, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
result = sd_pr_in_command(bdev, READ_KEYS, data, data_len);
|
||||
if (result)
|
||||
goto free_data;
|
||||
|
||||
keys_info->generation = get_unaligned_be32(&data[0]);
|
||||
keys_info->num_keys = get_unaligned_be32(&data[4]) / 8;
|
||||
|
||||
data_offset = 8;
|
||||
num_copy_keys = min(num_keys, keys_info->num_keys);
|
||||
|
||||
for (i = 0; i < num_copy_keys; i++) {
|
||||
keys_info->keys[i] = get_unaligned_be64(&data[data_offset]);
|
||||
data_offset += 8;
|
||||
}
|
||||
|
||||
free_data:
|
||||
kfree(data);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int sd_pr_read_reservation(struct block_device *bdev,
|
||||
struct pr_held_reservation *rsv)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
|
||||
struct scsi_device *sdev = sdkp->device;
|
||||
u8 data[24] = { };
|
||||
int result, len;
|
||||
|
||||
result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data));
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
len = get_unaligned_be32(&data[4]);
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
/* Make sure we have at least the key and type */
|
||||
if (len < 14) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"READ RESERVATION failed due to short return buffer of %d bytes\n",
|
||||
len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rsv->generation = get_unaligned_be32(&data[0]);
|
||||
rsv->key = get_unaligned_be64(&data[8]);
|
||||
rsv->type = scsi_pr_type_to_block(data[21] & 0x0f);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key,
|
||||
u64 sa_key, enum scsi_pr_type type, u8 flags)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
|
||||
struct scsi_device *sdev = sdkp->device;
|
||||
@ -1784,7 +1854,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
{
|
||||
if (flags & ~PR_FL_IGNORE_KEY)
|
||||
return -EOPNOTSUPP;
|
||||
return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
|
||||
return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
|
||||
old_key, new_key, 0,
|
||||
(1 << 0) /* APTPL */);
|
||||
}
|
||||
@ -1794,24 +1864,26 @@ static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
|
||||
{
|
||||
if (flags)
|
||||
return -EOPNOTSUPP;
|
||||
return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
|
||||
return sd_pr_out_command(bdev, 0x01, key, 0,
|
||||
block_pr_type_to_scsi(type), 0);
|
||||
}
|
||||
|
||||
static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
|
||||
{
|
||||
return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
|
||||
return sd_pr_out_command(bdev, 0x02, key, 0,
|
||||
block_pr_type_to_scsi(type), 0);
|
||||
}
|
||||
|
||||
static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
enum pr_type type, bool abort)
|
||||
{
|
||||
return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
|
||||
sd_pr_type(type), 0);
|
||||
return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
|
||||
block_pr_type_to_scsi(type), 0);
|
||||
}
|
||||
|
||||
static int sd_pr_clear(struct block_device *bdev, u64 key)
|
||||
{
|
||||
return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
|
||||
return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0);
|
||||
}
|
||||
|
||||
static const struct pr_ops sd_pr_ops = {
|
||||
@ -1820,6 +1892,8 @@ static const struct pr_ops sd_pr_ops = {
|
||||
.pr_release = sd_pr_release,
|
||||
.pr_preempt = sd_pr_preempt,
|
||||
.pr_clear = sd_pr_clear,
|
||||
.pr_read_keys = sd_pr_read_keys,
|
||||
.pr_read_reservation = sd_pr_read_reservation,
|
||||
};
|
||||
|
||||
static void scsi_disk_free_disk(struct gendisk *disk)
|
||||
|
@ -896,7 +896,7 @@ static void fd_free_prot(struct se_device *dev)
|
||||
fd_dev->fd_prot_file = NULL;
|
||||
}
|
||||
|
||||
static struct sbc_ops fd_sbc_ops = {
|
||||
static struct exec_cmd_ops fd_exec_cmd_ops = {
|
||||
.execute_rw = fd_execute_rw,
|
||||
.execute_sync_cache = fd_execute_sync_cache,
|
||||
.execute_write_same = fd_execute_write_same,
|
||||
@ -906,7 +906,7 @@ static struct sbc_ops fd_sbc_ops = {
|
||||
static sense_reason_t
|
||||
fd_parse_cdb(struct se_cmd *cmd)
|
||||
{
|
||||
return sbc_parse_cdb(cmd, &fd_sbc_ops);
|
||||
return sbc_parse_cdb(cmd, &fd_exec_cmd_ops);
|
||||
}
|
||||
|
||||
static const struct target_backend_ops fileio_ops = {
|
||||
|
@ -23,13 +23,16 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/pr.h>
|
||||
#include <scsi/scsi_proto.h>
|
||||
#include <scsi/scsi_common.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
|
||||
#include "target_core_iblock.h"
|
||||
#include "target_core_pr.h"
|
||||
|
||||
#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
|
||||
#define IBLOCK_BIO_POOL_SIZE 128
|
||||
@ -310,7 +313,7 @@ static sector_t iblock_get_blocks(struct se_device *dev)
|
||||
return blocks_long;
|
||||
}
|
||||
|
||||
static void iblock_complete_cmd(struct se_cmd *cmd)
|
||||
static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status)
|
||||
{
|
||||
struct iblock_req *ibr = cmd->priv;
|
||||
u8 status;
|
||||
@ -318,7 +321,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
|
||||
if (!refcount_dec_and_test(&ibr->pending))
|
||||
return;
|
||||
|
||||
if (atomic_read(&ibr->ib_bio_err_cnt))
|
||||
if (blk_status == BLK_STS_RESV_CONFLICT)
|
||||
status = SAM_STAT_RESERVATION_CONFLICT;
|
||||
else if (atomic_read(&ibr->ib_bio_err_cnt))
|
||||
status = SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
status = SAM_STAT_GOOD;
|
||||
@ -331,6 +336,7 @@ static void iblock_bio_done(struct bio *bio)
|
||||
{
|
||||
struct se_cmd *cmd = bio->bi_private;
|
||||
struct iblock_req *ibr = cmd->priv;
|
||||
blk_status_t blk_status = bio->bi_status;
|
||||
|
||||
if (bio->bi_status) {
|
||||
pr_err("bio error: %p, err: %d\n", bio, bio->bi_status);
|
||||
@ -343,7 +349,7 @@ static void iblock_bio_done(struct bio *bio)
|
||||
|
||||
bio_put(bio);
|
||||
|
||||
iblock_complete_cmd(cmd);
|
||||
iblock_complete_cmd(cmd, blk_status);
|
||||
}
|
||||
|
||||
static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num,
|
||||
@ -759,7 +765,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
|
||||
if (!sgl_nents) {
|
||||
refcount_set(&ibr->pending, 1);
|
||||
iblock_complete_cmd(cmd);
|
||||
iblock_complete_cmd(cmd, BLK_STS_OK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -817,7 +823,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
||||
}
|
||||
|
||||
iblock_submit_bios(&list);
|
||||
iblock_complete_cmd(cmd);
|
||||
iblock_complete_cmd(cmd, BLK_STS_OK);
|
||||
return 0;
|
||||
|
||||
fail_put_bios:
|
||||
@ -829,6 +835,258 @@ fail:
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key,
|
||||
u64 sa_key, u8 type, bool aptpl)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bdev = ib_dev->ibd_bd;
|
||||
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
|
||||
int ret;
|
||||
|
||||
if (!ops) {
|
||||
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
switch (sa) {
|
||||
case PRO_REGISTER:
|
||||
case PRO_REGISTER_AND_IGNORE_EXISTING_KEY:
|
||||
if (!ops->pr_register) {
|
||||
pr_err("block device does not support pr_register.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
/* The block layer pr ops always enables aptpl */
|
||||
if (!aptpl)
|
||||
pr_info("APTPL not set by initiator, but will be used.\n");
|
||||
|
||||
ret = ops->pr_register(bdev, key, sa_key,
|
||||
sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY);
|
||||
break;
|
||||
case PRO_RESERVE:
|
||||
if (!ops->pr_reserve) {
|
||||
pr_err("block_device does not support pr_reserve.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0);
|
||||
break;
|
||||
case PRO_CLEAR:
|
||||
if (!ops->pr_clear) {
|
||||
pr_err("block_device does not support pr_clear.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
ret = ops->pr_clear(bdev, key);
|
||||
break;
|
||||
case PRO_PREEMPT:
|
||||
case PRO_PREEMPT_AND_ABORT:
|
||||
if (!ops->pr_clear) {
|
||||
pr_err("block_device does not support pr_preempt.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
ret = ops->pr_preempt(bdev, key, sa_key,
|
||||
scsi_pr_type_to_block(type),
|
||||
sa == PRO_PREEMPT ? false : true);
|
||||
break;
|
||||
case PRO_RELEASE:
|
||||
if (!ops->pr_clear) {
|
||||
pr_err("block_device does not support pr_pclear.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type));
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa);
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
return TCM_NO_SENSE;
|
||||
else if (ret == PR_STS_RESERVATION_CONFLICT)
|
||||
return TCM_RESERVATION_CONFLICT;
|
||||
else
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
static void iblock_pr_report_caps(unsigned char *param_data)
|
||||
{
|
||||
u16 len = 8;
|
||||
|
||||
put_unaligned_be16(len, ¶m_data[0]);
|
||||
/*
|
||||
* When using the pr_ops passthrough method we only support exporting
|
||||
* the device through one target port because from the backend module
|
||||
* level we can't see the target port config. As a result we only
|
||||
* support registration directly from the I_T nexus the cmd is sent
|
||||
* through and do not set ATP_C here.
|
||||
*
|
||||
* The block layer pr_ops do not support passing in initiators so
|
||||
* we don't set SIP_C here.
|
||||
*/
|
||||
/* PTPL_C: Persistence across Target Power Loss bit */
|
||||
param_data[2] |= 0x01;
|
||||
/*
|
||||
* We are filling in the PERSISTENT RESERVATION TYPE MASK below, so
|
||||
* set the TMV: Task Mask Valid bit.
|
||||
*/
|
||||
param_data[3] |= 0x80;
|
||||
/*
|
||||
* Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166
|
||||
*/
|
||||
param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */
|
||||
/*
|
||||
* PTPL_A: Persistence across Target Power Loss Active bit. The block
|
||||
* layer pr ops always enables this so report it active.
|
||||
*/
|
||||
param_data[3] |= 0x01;
|
||||
/*
|
||||
* Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37.
|
||||
*/
|
||||
param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
|
||||
param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */
|
||||
param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */
|
||||
param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */
|
||||
param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
|
||||
param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
|
||||
}
|
||||
|
||||
static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd,
|
||||
unsigned char *param_data)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bdev = ib_dev->ibd_bd;
|
||||
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
|
||||
int i, len, paths, data_offset;
|
||||
struct pr_keys *keys;
|
||||
sense_reason_t ret;
|
||||
|
||||
if (!ops) {
|
||||
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (!ops->pr_read_keys) {
|
||||
pr_err("Block device does not support read_keys.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't know what's under us, but dm-multipath will register every
|
||||
* path with the same key, so start off with enough space for 16 paths.
|
||||
* which is not a lot of memory and should normally be enough.
|
||||
*/
|
||||
paths = 16;
|
||||
retry:
|
||||
len = 8 * paths;
|
||||
keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL);
|
||||
if (!keys)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
keys->num_keys = paths;
|
||||
if (!ops->pr_read_keys(bdev, keys)) {
|
||||
if (keys->num_keys > paths) {
|
||||
kfree(keys);
|
||||
paths *= 2;
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
goto free_keys;
|
||||
}
|
||||
|
||||
ret = TCM_NO_SENSE;
|
||||
|
||||
put_unaligned_be32(keys->generation, ¶m_data[0]);
|
||||
if (!keys->num_keys) {
|
||||
put_unaligned_be32(0, ¶m_data[4]);
|
||||
goto free_keys;
|
||||
}
|
||||
|
||||
put_unaligned_be32(8 * keys->num_keys, ¶m_data[4]);
|
||||
|
||||
data_offset = 8;
|
||||
for (i = 0; i < keys->num_keys; i++) {
|
||||
if (data_offset + 8 > cmd->data_length)
|
||||
break;
|
||||
|
||||
put_unaligned_be64(keys->keys[i], ¶m_data[data_offset]);
|
||||
data_offset += 8;
|
||||
}
|
||||
|
||||
free_keys:
|
||||
kfree(keys);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd,
|
||||
unsigned char *param_data)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
struct block_device *bdev = ib_dev->ibd_bd;
|
||||
const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
|
||||
struct pr_held_reservation rsv = { };
|
||||
|
||||
if (!ops) {
|
||||
pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (!ops->pr_read_reservation) {
|
||||
pr_err("Block device does not support read_keys.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (ops->pr_read_reservation(bdev, &rsv))
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
put_unaligned_be32(rsv.generation, ¶m_data[0]);
|
||||
if (!block_pr_type_to_scsi(rsv.type)) {
|
||||
put_unaligned_be32(0, ¶m_data[4]);
|
||||
return TCM_NO_SENSE;
|
||||
}
|
||||
|
||||
put_unaligned_be32(16, ¶m_data[4]);
|
||||
|
||||
if (cmd->data_length < 16)
|
||||
return TCM_NO_SENSE;
|
||||
put_unaligned_be64(rsv.key, ¶m_data[8]);
|
||||
|
||||
if (cmd->data_length < 22)
|
||||
return TCM_NO_SENSE;
|
||||
param_data[21] = block_pr_type_to_scsi(rsv.type);
|
||||
|
||||
return TCM_NO_SENSE;
|
||||
}
|
||||
|
||||
static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa,
|
||||
unsigned char *param_data)
|
||||
{
|
||||
sense_reason_t ret = TCM_NO_SENSE;
|
||||
|
||||
switch (sa) {
|
||||
case PRI_REPORT_CAPABILITIES:
|
||||
iblock_pr_report_caps(param_data);
|
||||
break;
|
||||
case PRI_READ_KEYS:
|
||||
ret = iblock_pr_read_keys(cmd, param_data);
|
||||
break;
|
||||
case PRI_READ_RESERVATION:
|
||||
ret = iblock_pr_read_reservation(cmd, param_data);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa);
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
|
||||
{
|
||||
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
|
||||
@ -869,17 +1127,19 @@ static unsigned int iblock_get_io_opt(struct se_device *dev)
|
||||
return bdev_io_opt(bd);
|
||||
}
|
||||
|
||||
static struct sbc_ops iblock_sbc_ops = {
|
||||
static struct exec_cmd_ops iblock_exec_cmd_ops = {
|
||||
.execute_rw = iblock_execute_rw,
|
||||
.execute_sync_cache = iblock_execute_sync_cache,
|
||||
.execute_write_same = iblock_execute_write_same,
|
||||
.execute_unmap = iblock_execute_unmap,
|
||||
.execute_pr_out = iblock_execute_pr_out,
|
||||
.execute_pr_in = iblock_execute_pr_in,
|
||||
};
|
||||
|
||||
static sense_reason_t
|
||||
iblock_parse_cdb(struct se_cmd *cmd)
|
||||
{
|
||||
return sbc_parse_cdb(cmd, &iblock_sbc_ops);
|
||||
return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops);
|
||||
}
|
||||
|
||||
static bool iblock_get_write_cache(struct se_device *dev)
|
||||
@ -890,6 +1150,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
|
||||
static const struct target_backend_ops iblock_ops = {
|
||||
.name = "iblock",
|
||||
.inquiry_prod = "IBLOCK",
|
||||
.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR,
|
||||
.inquiry_rev = IBLOCK_VERSION,
|
||||
.owner = THIS_MODULE,
|
||||
.attach_hba = iblock_attach_hba,
|
||||
|
@ -3538,6 +3538,37 @@ out_put_pr_reg:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key,
|
||||
u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt)
|
||||
{
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
|
||||
if (!cmd->se_sess || !cmd->se_lun) {
|
||||
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
if (!ops->execute_pr_out) {
|
||||
pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
switch (sa) {
|
||||
case PRO_REGISTER_AND_MOVE:
|
||||
case PRO_REPLACE_LOST_RESERVATION:
|
||||
pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (spec_i_pt || all_tg_pt) {
|
||||
pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl);
|
||||
}
|
||||
|
||||
/*
|
||||
* See spc4r17 section 6.14 Table 170
|
||||
*/
|
||||
@ -3641,6 +3672,12 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||
return TCM_PARAMETER_LIST_LENGTH_ERROR;
|
||||
}
|
||||
|
||||
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
|
||||
ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type,
|
||||
aptpl, all_tg_pt, spec_i_pt);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* (core_scsi3_emulate_pro_* function parameters
|
||||
* are defined by spc4r17 Table 174:
|
||||
@ -3682,6 +3719,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
done:
|
||||
if (!ret)
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return ret;
|
||||
@ -4039,9 +4077,42 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa)
|
||||
{
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
unsigned char *buf;
|
||||
sense_reason_t ret;
|
||||
|
||||
if (cmd->data_length < 8) {
|
||||
pr_err("PRIN SA SCSI Data Length: %u too small\n",
|
||||
cmd->data_length);
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
if (!ops->execute_pr_in) {
|
||||
pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
if (sa == PRI_READ_FULL_STATUS) {
|
||||
pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n");
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
}
|
||||
|
||||
buf = transport_kmap_data_sg(cmd);
|
||||
if (!buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
ret = ops->execute_pr_in(cmd, sa, buf);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sense_reason_t
|
||||
target_scsi3_emulate_pr_in(struct se_cmd *cmd)
|
||||
{
|
||||
u8 sa = cmd->t_task_cdb[1] & 0x1f;
|
||||
sense_reason_t ret;
|
||||
|
||||
/*
|
||||
@ -4060,7 +4131,12 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
|
||||
return TCM_RESERVATION_CONFLICT;
|
||||
}
|
||||
|
||||
switch (cmd->t_task_cdb[1] & 0x1f) {
|
||||
if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) {
|
||||
ret = target_try_pr_in_pt(cmd, sa);
|
||||
goto done;
|
||||
}
|
||||
|
||||
switch (sa) {
|
||||
case PRI_READ_KEYS:
|
||||
ret = core_scsi3_pri_read_keys(cmd);
|
||||
break;
|
||||
@ -4079,6 +4155,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
|
||||
done:
|
||||
if (!ret)
|
||||
target_complete_cmd(cmd, SAM_STAT_GOOD);
|
||||
return ret;
|
||||
|
@ -643,14 +643,14 @@ static void rd_free_prot(struct se_device *dev)
|
||||
rd_release_prot_space(rd_dev);
|
||||
}
|
||||
|
||||
static struct sbc_ops rd_sbc_ops = {
|
||||
static struct exec_cmd_ops rd_exec_cmd_ops = {
|
||||
.execute_rw = rd_execute_rw,
|
||||
};
|
||||
|
||||
static sense_reason_t
|
||||
rd_parse_cdb(struct se_cmd *cmd)
|
||||
{
|
||||
return sbc_parse_cdb(cmd, &rd_sbc_ops);
|
||||
return sbc_parse_cdb(cmd, &rd_exec_cmd_ops);
|
||||
}
|
||||
|
||||
static const struct target_backend_ops rd_mcp_ops = {
|
||||
|
@ -192,7 +192,7 @@ EXPORT_SYMBOL(sbc_get_write_same_sectors);
|
||||
static sense_reason_t
|
||||
sbc_execute_write_same_unmap(struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
sector_t nolb = sbc_get_write_same_sectors(cmd);
|
||||
sense_reason_t ret;
|
||||
|
||||
@ -271,7 +271,8 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb)
|
||||
}
|
||||
|
||||
static sense_reason_t
|
||||
sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
|
||||
sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags,
|
||||
struct exec_cmd_ops *ops)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sector_t end_lba = dev->transport->get_blocks(dev) + 1;
|
||||
@ -340,7 +341,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op
|
||||
static sense_reason_t
|
||||
sbc_execute_rw(struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
|
||||
return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
|
||||
cmd->data_direction);
|
||||
@ -566,7 +567,7 @@ out:
|
||||
static sense_reason_t
|
||||
sbc_compare_and_write(struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sense_reason_t ret;
|
||||
int rc;
|
||||
@ -764,7 +765,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
|
||||
}
|
||||
|
||||
sense_reason_t
|
||||
sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
|
||||
sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
@ -1076,7 +1077,7 @@ EXPORT_SYMBOL(sbc_get_device_type);
|
||||
static sense_reason_t
|
||||
sbc_execute_unmap(struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *buf, *ptr = NULL;
|
||||
sector_t lba;
|
||||
|
@ -1424,9 +1424,10 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = {
|
||||
.update_usage_bits = set_dpofua_usage_bits,
|
||||
};
|
||||
|
||||
static bool tcm_is_ws_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) ||
|
||||
@ -1451,7 +1452,8 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = {
|
||||
.update_usage_bits = set_dpofua_usage_bits32,
|
||||
};
|
||||
|
||||
static bool tcm_is_caw_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
@ -1491,7 +1493,8 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = {
|
||||
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
|
||||
};
|
||||
|
||||
static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
@ -1502,7 +1505,6 @@ static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd)
|
||||
}
|
||||
spin_unlock(&dev->t10_alua.lba_map_lock);
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_read_report_refferals = {
|
||||
@ -1537,9 +1539,10 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = {
|
||||
0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK},
|
||||
};
|
||||
|
||||
static bool tcm_is_unmap_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct sbc_ops *ops = cmd->protocol_data;
|
||||
struct exec_cmd_ops *ops = cmd->protocol_data;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
return ops->execute_unmap && dev->dev_attrib.emulate_tpu;
|
||||
@ -1659,11 +1662,46 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = {
|
||||
0xff, SCSI_CONTROL_MASK},
|
||||
};
|
||||
|
||||
static bool tcm_is_pr_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
return dev->dev_attrib.emulate_pr;
|
||||
if (!dev->dev_attrib.emulate_pr)
|
||||
return false;
|
||||
|
||||
if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
|
||||
return true;
|
||||
|
||||
switch (descr->opcode) {
|
||||
case RESERVE:
|
||||
case RESERVE_10:
|
||||
case RELEASE:
|
||||
case RELEASE_10:
|
||||
/*
|
||||
* The pr_ops which are used by the backend modules don't
|
||||
* support these commands.
|
||||
*/
|
||||
return false;
|
||||
case PERSISTENT_RESERVE_OUT:
|
||||
switch (descr->service_action) {
|
||||
case PRO_REGISTER_AND_MOVE:
|
||||
case PRO_REPLACE_LOST_RESERVATION:
|
||||
/*
|
||||
* The backend modules don't have access to ports and
|
||||
* I_T nexuses so they can't handle these type of
|
||||
* requests.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case PERSISTENT_RESERVE_IN:
|
||||
if (descr->service_action == PRI_READ_FULL_STATUS)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_pri_read_caps = {
|
||||
@ -1788,20 +1826,13 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = {
|
||||
.enabled = tcm_is_pr_enabled,
|
||||
};
|
||||
|
||||
static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
return dev->dev_attrib.emulate_pr;
|
||||
}
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_release = {
|
||||
.support = SCSI_SUPPORT_FULL,
|
||||
.opcode = RELEASE,
|
||||
.cdb_size = 6,
|
||||
.usage_bits = {RELEASE, 0x00, 0x00, 0x00,
|
||||
0x00, SCSI_CONTROL_MASK},
|
||||
.enabled = tcm_is_scsi2_reservations_enabled,
|
||||
.enabled = tcm_is_pr_enabled,
|
||||
};
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_release10 = {
|
||||
@ -1811,7 +1842,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = {
|
||||
.usage_bits = {RELEASE_10, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xff,
|
||||
0xff, SCSI_CONTROL_MASK},
|
||||
.enabled = tcm_is_scsi2_reservations_enabled,
|
||||
.enabled = tcm_is_pr_enabled,
|
||||
};
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_reserve = {
|
||||
@ -1820,7 +1851,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = {
|
||||
.cdb_size = 6,
|
||||
.usage_bits = {RESERVE, 0x00, 0x00, 0x00,
|
||||
0x00, SCSI_CONTROL_MASK},
|
||||
.enabled = tcm_is_scsi2_reservations_enabled,
|
||||
.enabled = tcm_is_pr_enabled,
|
||||
};
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_reserve10 = {
|
||||
@ -1830,7 +1861,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = {
|
||||
.usage_bits = {RESERVE_10, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0xff,
|
||||
0xff, SCSI_CONTROL_MASK},
|
||||
.enabled = tcm_is_scsi2_reservations_enabled,
|
||||
.enabled = tcm_is_pr_enabled,
|
||||
};
|
||||
|
||||
static struct target_opcode_descriptor tcm_opcode_request_sense = {
|
||||
@ -1849,7 +1880,8 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = {
|
||||
0xff, SCSI_CONTROL_MASK},
|
||||
};
|
||||
|
||||
static bool tcm_is_3pc_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
@ -1910,8 +1942,8 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = {
|
||||
0xff, 0xff, 0x00, SCSI_CONTROL_MASK},
|
||||
};
|
||||
|
||||
|
||||
static bool spc_rsoc_enabled(struct se_cmd *cmd)
|
||||
static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
@ -1931,7 +1963,8 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = {
|
||||
.enabled = spc_rsoc_enabled,
|
||||
};
|
||||
|
||||
static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd)
|
||||
static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd)
|
||||
{
|
||||
struct t10_alua_tg_pt_gp *l_tg_pt_gp;
|
||||
struct se_lun *l_lun = cmd->se_lun;
|
||||
@ -2118,7 +2151,7 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
|
||||
if (descr->serv_action_valid)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
|
||||
if (!descr->enabled || descr->enabled(cmd))
|
||||
if (!descr->enabled || descr->enabled(descr, cmd))
|
||||
*opcode = descr;
|
||||
break;
|
||||
case 0x2:
|
||||
@ -2132,7 +2165,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
|
||||
*/
|
||||
if (descr->serv_action_valid &&
|
||||
descr->service_action == requested_sa) {
|
||||
if (!descr->enabled || descr->enabled(cmd))
|
||||
if (!descr->enabled || descr->enabled(descr,
|
||||
cmd))
|
||||
*opcode = descr;
|
||||
} else if (!descr->serv_action_valid)
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
@ -2145,7 +2179,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode)
|
||||
* be returned in the one_command parameter data format.
|
||||
*/
|
||||
if (descr->service_action == requested_sa)
|
||||
if (!descr->enabled || descr->enabled(cmd))
|
||||
if (!descr->enabled || descr->enabled(descr,
|
||||
cmd))
|
||||
*opcode = descr;
|
||||
break;
|
||||
}
|
||||
@ -2202,7 +2237,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) {
|
||||
descr = tcm_supported_opcodes[i];
|
||||
if (descr->enabled && !descr->enabled(cmd))
|
||||
if (descr->enabled && !descr->enabled(descr, cmd))
|
||||
continue;
|
||||
|
||||
response_length += spc_rsoc_encode_command_descriptor(
|
||||
@ -2231,12 +2266,22 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
unsigned char *cdb = cmd->t_task_cdb;
|
||||
|
||||
if (!dev->dev_attrib.emulate_pr &&
|
||||
((cdb[0] == PERSISTENT_RESERVE_IN) ||
|
||||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
|
||||
(cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
|
||||
(cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
switch (cdb[0]) {
|
||||
case RESERVE:
|
||||
case RESERVE_10:
|
||||
case RELEASE:
|
||||
case RELEASE_10:
|
||||
if (!dev->dev_attrib.emulate_pr)
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
|
||||
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
break;
|
||||
case PERSISTENT_RESERVE_IN:
|
||||
case PERSISTENT_RESERVE_OUT:
|
||||
if (!dev->dev_attrib.emulate_pr)
|
||||
return TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (cdb[0]) {
|
||||
|
@ -101,7 +101,7 @@ typedef u16 blk_short_t;
|
||||
#define BLK_STS_NOSPC ((__force blk_status_t)3)
|
||||
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
|
||||
#define BLK_STS_TARGET ((__force blk_status_t)5)
|
||||
#define BLK_STS_NEXUS ((__force blk_status_t)6)
|
||||
#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
|
||||
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
|
||||
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
|
||||
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
|
||||
@ -189,7 +189,7 @@ static inline bool blk_path_error(blk_status_t error)
|
||||
case BLK_STS_NOTSUPP:
|
||||
case BLK_STS_NOSPC:
|
||||
case BLK_STS_TARGET:
|
||||
case BLK_STS_NEXUS:
|
||||
case BLK_STS_RESV_CONFLICT:
|
||||
case BLK_STS_MEDIUM:
|
||||
case BLK_STS_PROTECTION:
|
||||
return false;
|
||||
|
@ -759,20 +759,55 @@ enum {
|
||||
NVME_LBART_ATTRIB_HIDE = 1 << 1,
|
||||
};
|
||||
|
||||
enum nvme_pr_type {
|
||||
NVME_PR_WRITE_EXCLUSIVE = 1,
|
||||
NVME_PR_EXCLUSIVE_ACCESS = 2,
|
||||
NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
|
||||
NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
|
||||
NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
|
||||
NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
|
||||
};
|
||||
|
||||
enum nvme_eds {
|
||||
NVME_EXTENDED_DATA_STRUCT = 0x1,
|
||||
};
|
||||
|
||||
struct nvme_registered_ctrl {
|
||||
__le16 cntlid;
|
||||
__u8 rcsts;
|
||||
__u8 rsvd3[5];
|
||||
__le64 hostid;
|
||||
__le64 rkey;
|
||||
};
|
||||
|
||||
struct nvme_reservation_status {
|
||||
__le32 gen;
|
||||
__u8 rtype;
|
||||
__u8 regctl[2];
|
||||
__u8 resv5[2];
|
||||
__u8 ptpls;
|
||||
__u8 resv10[13];
|
||||
struct {
|
||||
__le16 cntlid;
|
||||
__u8 rcsts;
|
||||
__u8 resv3[5];
|
||||
__le64 hostid;
|
||||
__le64 rkey;
|
||||
} regctl_ds[];
|
||||
__u8 resv10[14];
|
||||
struct nvme_registered_ctrl regctl_ds[];
|
||||
};
|
||||
|
||||
struct nvme_registered_ctrl_ext {
|
||||
__le16 cntlid;
|
||||
__u8 rcsts;
|
||||
__u8 rsvd3[5];
|
||||
__le64 rkey;
|
||||
__u8 hostid[16];
|
||||
__u8 rsvd32[32];
|
||||
};
|
||||
|
||||
struct nvme_reservation_status_ext {
|
||||
__le32 gen;
|
||||
__u8 rtype;
|
||||
__u8 regctl[2];
|
||||
__u8 resv5[2];
|
||||
__u8 ptpls;
|
||||
__u8 resv10[14];
|
||||
__u8 rsvd24[40];
|
||||
struct nvme_registered_ctrl_ext regctl_eds[];
|
||||
};
|
||||
|
||||
enum nvme_async_event_type {
|
||||
|
@ -4,6 +4,18 @@
|
||||
|
||||
#include <uapi/linux/pr.h>
|
||||
|
||||
struct pr_keys {
|
||||
u32 generation;
|
||||
u32 num_keys;
|
||||
u64 keys[];
|
||||
};
|
||||
|
||||
struct pr_held_reservation {
|
||||
u64 key;
|
||||
u32 generation;
|
||||
enum pr_type type;
|
||||
};
|
||||
|
||||
struct pr_ops {
|
||||
int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
u32 flags);
|
||||
@ -14,6 +26,19 @@ struct pr_ops {
|
||||
int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
|
||||
enum pr_type type, bool abort);
|
||||
int (*pr_clear)(struct block_device *bdev, u64 key);
|
||||
/*
|
||||
* pr_read_keys - Read the registered keys and return them in the
|
||||
* pr_keys->keys array. The keys array will have been allocated at the
|
||||
* end of the pr_keys struct, and pr_keys->num_keys must be set to the
|
||||
* number of keys the array can hold. If there are more than can fit
|
||||
* in the array, success will still be returned and pr_keys->num_keys
|
||||
* will reflect the total number of keys the device contains, so the
|
||||
* caller can retry with a larger array.
|
||||
*/
|
||||
int (*pr_read_keys)(struct block_device *bdev,
|
||||
struct pr_keys *keys_info);
|
||||
int (*pr_read_reservation)(struct block_device *bdev,
|
||||
struct pr_held_reservation *rsv);
|
||||
};
|
||||
|
||||
#endif /* LINUX_PR_H */
|
||||
|
@ -7,8 +7,21 @@
|
||||
#define _SCSI_COMMON_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <uapi/linux/pr.h>
|
||||
#include <scsi/scsi_proto.h>
|
||||
|
||||
enum scsi_pr_type {
|
||||
SCSI_PR_WRITE_EXCLUSIVE = 0x01,
|
||||
SCSI_PR_EXCLUSIVE_ACCESS = 0x03,
|
||||
SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05,
|
||||
SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06,
|
||||
SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07,
|
||||
SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08,
|
||||
};
|
||||
|
||||
enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type);
|
||||
enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type);
|
||||
|
||||
static inline unsigned
|
||||
scsi_varlen_cdb_length(const void *hdr)
|
||||
{
|
||||
|
@ -151,6 +151,11 @@
|
||||
#define ZO_FINISH_ZONE 0x02
|
||||
#define ZO_OPEN_ZONE 0x03
|
||||
#define ZO_RESET_WRITE_POINTER 0x04
|
||||
/* values for PR in service action */
|
||||
#define READ_KEYS 0x00
|
||||
#define READ_RESERVATION 0x01
|
||||
#define REPORT_CAPABILITES 0x02
|
||||
#define READ_FULL_STATUS 0x03
|
||||
/* values for variable length command */
|
||||
#define XDREAD_32 0x03
|
||||
#define XDWRITE_32 0x04
|
||||
|
@ -62,13 +62,17 @@ struct target_backend_ops {
|
||||
struct configfs_attribute **tb_dev_action_attrs;
|
||||
};
|
||||
|
||||
struct sbc_ops {
|
||||
struct exec_cmd_ops {
|
||||
sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *,
|
||||
u32, enum dma_data_direction);
|
||||
sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd);
|
||||
sense_reason_t (*execute_write_same)(struct se_cmd *cmd);
|
||||
sense_reason_t (*execute_unmap)(struct se_cmd *cmd,
|
||||
sector_t lba, sector_t nolb);
|
||||
sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key,
|
||||
u64 sa_key, u8 type, bool aptpl);
|
||||
sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa,
|
||||
unsigned char *param_data);
|
||||
};
|
||||
|
||||
int transport_backend_register(const struct target_backend_ops *);
|
||||
@ -86,7 +90,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
|
||||
sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *);
|
||||
sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *);
|
||||
|
||||
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops);
|
||||
sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops);
|
||||
u32 sbc_get_device_rev(struct se_device *dev);
|
||||
u32 sbc_get_device_type(struct se_device *dev);
|
||||
sector_t sbc_get_write_same_sectors(struct se_cmd *cmd);
|
||||
|
@ -880,7 +880,8 @@ struct target_opcode_descriptor {
|
||||
u8 specific_timeout;
|
||||
u16 nominal_timeout;
|
||||
u16 recommended_timeout;
|
||||
bool (*enabled)(struct se_cmd *cmd);
|
||||
bool (*enabled)(struct target_opcode_descriptor *descr,
|
||||
struct se_cmd *cmd);
|
||||
void (*update_usage_bits)(u8 *usage_bits,
|
||||
struct se_device *dev);
|
||||
u8 usage_bits[];
|
||||
|
Loading…
Reference in New Issue
Block a user