target/file: Update hw_max_sectors based on current block_size

This patch allows FILEIO to update hw_max_sectors based on the current
max_bytes_per_io.  This is required because vfs_[writev,readv]() can accept
a maximum of 2048 iovecs per call, so the enforced hw_max_sectors really
needs to be calculated based on block_size.

This addresses a >= v3.5 bug where block_size=512 was rejecting > 1M
sized I/O requests, because FD_MAX_SECTORS was hardcoded to 2048 for
the block_size=4096 case.

(v2: Use max_bytes_per_io instead of ->update_hw_max_sectors)

Reported-by: Henrik Goldman <hg@x-formation.com>
Cc: <stable@vger.kernel.org> #3.5+
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Nicholas Bellinger 2013-12-12 12:24:11 -08:00
parent 2853c2b667
commit 95cadace8f
4 changed files with 14 additions and 5 deletions

View File

@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size; dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n", pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size); dev, block_size);
if (dev->dev_attrib.max_bytes_per_io)
dev->dev_attrib.hw_max_sectors =
dev->dev_attrib.max_bytes_per_io / block_size;
return 0; return 0;
} }

View File

@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION, " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION); TARGET_CORE_MOD_VERSION);
pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
" MaxSectors: %u\n", hba->hba_id, fd_host->fd_host_id);
hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0; return 0;
} }
@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
} }
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {

View File

@ -7,7 +7,10 @@
#define FD_DEVICE_QUEUE_DEPTH 32 #define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512 #define FD_BLOCKSIZE 512
#define FD_MAX_SECTORS 2048 /*
* Limited by the number of iovecs (2048) per vfs_[writev,readv] call
*/
#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01 #define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02 #define RRF_GOT_LBA 0x02

View File

@ -620,6 +620,7 @@ struct se_dev_attrib {
u32 unmap_granularity; u32 unmap_granularity;
u32 unmap_granularity_alignment; u32 unmap_granularity_alignment;
u32 max_write_same_len; u32 max_write_same_len;
u32 max_bytes_per_io;
struct se_device *da_dev; struct se_device *da_dev;
struct config_group da_group; struct config_group da_group;
}; };