mirror of
https://github.com/torvalds/linux.git
synced 2024-11-30 16:11:38 +00:00
769031c92c
There are two advantages: * Direct I/O allows to avoid the write-back cache, so it reduces affects to other processes in the system. * Async I/O allows to handle a few commands concurrently. DIO + AIO shows a better perfomance for random write operations: Mode: O_DSYNC Async: 1 $ ./fio --bs=4K --direct=1 --rw=randwrite --ioengine=libaio --iodepth=64 --name=/dev/sda --runtime=20 --numjobs=2 WRITE: bw=45.9MiB/s (48.1MB/s), 21.9MiB/s-23.0MiB/s (22.0MB/s-25.2MB/s), io=919MiB (963MB), run=20002-20020msec Mode: O_DSYNC Async: 0 $ ./fio --bs=4K --direct=1 --rw=randwrite --ioengine=libaio --iodepth=64 --name=/dev/sdb --runtime=20 --numjobs=2 WRITE: bw=1607KiB/s (1645kB/s), 802KiB/s-805KiB/s (821kB/s-824kB/s), io=31.8MiB (33.4MB), run=20280-20295msec Known issue: DIF (PI) emulation doesn't work when a target uses async I/O, because DIF metadata is saved in a separate file, and it is another non-trivial task how to synchronize writing in two files, so that a following read operation always returns a consisten metadata for a specified block. Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Tested-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Signed-off-by: Andrei Vagin <avagin@openvz.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bryant G. Ly <bryantly@linux.vnet.ibm.com> Reviewed-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
53 lines
1.2 KiB
C
53 lines
1.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef TARGET_CORE_FILE_H
|
|
#define TARGET_CORE_FILE_H
|
|
|
|
#include <target/target_core_base.h>
|
|
|
|
#define FD_VERSION "4.0"
|
|
|
|
#define FD_MAX_DEV_NAME 256
|
|
#define FD_MAX_DEV_PROT_NAME FD_MAX_DEV_NAME + 16
|
|
#define FD_DEVICE_QUEUE_DEPTH 32
|
|
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
|
|
#define FD_BLOCKSIZE 512
|
|
/*
|
|
* Limited by the number of iovecs (2048) per vfs_[writev,readv] call
|
|
*/
|
|
#define FD_MAX_BYTES 8388608
|
|
|
|
#define RRF_EMULATE_CDB 0x01
|
|
#define RRF_GOT_LBA 0x02
|
|
|
|
#define FBDF_HAS_PATH 0x01
|
|
#define FBDF_HAS_SIZE 0x02
|
|
#define FDBD_HAS_BUFFERED_IO_WCE 0x04
|
|
#define FDBD_HAS_ASYNC_IO 0x08
|
|
#define FDBD_FORMAT_UNIT_SIZE 2048
|
|
|
|
struct fd_dev {
|
|
struct se_device dev;
|
|
|
|
u32 fbd_flags;
|
|
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
|
|
/* Unique Ramdisk Device ID in Ramdisk HBA */
|
|
u32 fd_dev_id;
|
|
/* Number of SG tables in sg_table_array */
|
|
u32 fd_table_count;
|
|
u32 fd_queue_depth;
|
|
u32 fd_block_size;
|
|
unsigned long long fd_dev_size;
|
|
struct file *fd_file;
|
|
struct file *fd_prot_file;
|
|
/* FILEIO HBA device is connected to */
|
|
struct fd_host *fd_host;
|
|
} ____cacheline_aligned;
|
|
|
|
struct fd_host {
|
|
u32 fd_host_dev_id_count;
|
|
/* Unique FILEIO Host ID */
|
|
u32 fd_host_id;
|
|
} ____cacheline_aligned;
|
|
|
|
#endif /* TARGET_CORE_FILE_H */
|