2019-05-19 12:07:45 +00:00
|
|
|
# SPDX-License-Identifier: GPL-2.0-only
|
2016-02-10 18:03:32 +00:00
|
|
|
config NVME_CORE
|
|
|
|
tristate
|
2019-12-23 08:13:51 +00:00
|
|
|
select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY
|
2016-02-10 18:03:32 +00:00
|
|
|
|
2015-10-09 16:17:06 +00:00
|
|
|
config BLK_DEV_NVME
|
|
|
|
tristate "NVM Express block device"
|
2015-10-12 17:37:38 +00:00
|
|
|
depends on PCI && BLOCK
|
2016-02-10 18:03:32 +00:00
|
|
|
select NVME_CORE
|
2020-06-13 16:50:22 +00:00
|
|
|
help
|
2015-10-09 16:17:06 +00:00
|
|
|
The NVM Express driver is for solid state drives directly
|
|
|
|
connected to the PCI or PCI Express bus. If you know you
|
|
|
|
don't have one of these, it is safe to answer N.
|
|
|
|
|
|
|
|
To compile this driver as a module, choose M here: the
|
|
|
|
module will be called nvme.
|
2015-12-24 14:27:02 +00:00
|
|
|
|
2017-11-02 11:59:30 +00:00
|
|
|
config NVME_MULTIPATH
|
|
|
|
bool "NVMe multipath support"
|
|
|
|
depends on NVME_CORE
|
2020-06-13 16:50:22 +00:00
|
|
|
help
|
2017-11-02 11:59:30 +00:00
|
|
|
This option enables support for multipath access to NVMe
|
|
|
|
subsystems. If this option is enabled only a single
|
|
|
|
/dev/nvmeXnY device will show up for each NVMe namespaces,
|
|
|
|
even if it is accessible through multiple controllers.
|
|
|
|
|
2019-11-06 14:35:18 +00:00
|
|
|
config NVME_HWMON
|
|
|
|
bool "NVMe hardware monitoring"
|
|
|
|
depends on (NVME_CORE=y && HWMON=y) || (NVME_CORE=m && HWMON)
|
|
|
|
help
|
|
|
|
This provides support for NVMe hardware monitoring. If enabled,
|
|
|
|
a hardware monitoring device will be created for each NVMe drive
|
|
|
|
in the system.
|
|
|
|
|
2016-06-13 14:45:26 +00:00
|
|
|
config NVME_FABRICS
|
|
|
|
tristate
|
2016-07-06 12:55:52 +00:00
|
|
|
|
|
|
|
config NVME_RDMA
|
|
|
|
tristate "NVM Express over Fabrics RDMA host driver"
|
2018-05-25 21:29:59 +00:00
|
|
|
depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
|
2016-08-18 18:16:36 +00:00
|
|
|
select NVME_CORE
|
2016-07-06 12:55:52 +00:00
|
|
|
select NVME_FABRICS
|
|
|
|
select SG_POOL
|
|
|
|
help
|
|
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
|
|
the RDMA (Infiniband, RoCE, iWarp) transport. This allows you
|
|
|
|
to use remote block devices exported using the NVMe protocol set.
|
|
|
|
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
|
|
|
|
If unsure, say N.
|
2016-12-02 08:28:42 +00:00
|
|
|
|
|
|
|
config NVME_FC
|
|
|
|
tristate "NVM Express over Fabrics FC host driver"
|
|
|
|
depends on BLOCK
|
|
|
|
depends on HAS_DMA
|
|
|
|
select NVME_CORE
|
|
|
|
select NVME_FABRICS
|
|
|
|
select SG_POOL
|
|
|
|
help
|
|
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
|
|
the FC transport. This allows you to use remote block devices
|
|
|
|
exported using the NVMe protocol set.
|
|
|
|
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
|
|
|
|
If unsure, say N.
|
2018-12-04 01:52:17 +00:00
|
|
|
|
|
|
|
config NVME_TCP
|
|
|
|
tristate "NVM Express over Fabrics TCP host driver"
|
|
|
|
depends on INET
|
|
|
|
depends on BLK_DEV_NVME
|
|
|
|
select NVME_FABRICS
|
2019-07-14 08:18:42 +00:00
|
|
|
select CRYPTO_CRC32C
|
2018-12-04 01:52:17 +00:00
|
|
|
help
|
|
|
|
This provides support for the NVMe over Fabrics protocol using
|
|
|
|
the TCP transport. This allows you to use remote block devices
|
|
|
|
exported using the NVMe protocol set.
|
|
|
|
|
|
|
|
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
|
|
|
from https://github.com/linux-nvme/nvme-cli.
|
|
|
|
|
|
|
|
If unsure, say N.
|