forked from Minki/linux
um: ubd: Add REQ_FLUSH suppport
UML's block device driver does not support write barriers, to support this this patch adds REQ_FLUSH suppport. Every time the block layer sends a REQ_FLUSH we fsync() now our backing file to guarantee data consistency. Reported-and-tested-by: Richard W.M. Jones <rjones@redhat.com> Signed-off-by: Richard Weinberger <richard@nod.at>
This commit is contained in:
parent
f75b1b1bed
commit
805f11a0d5
@ -41,7 +41,7 @@
|
||||
#include <os.h>
|
||||
#include "cow.h"
|
||||
|
||||
enum ubd_req { UBD_READ, UBD_WRITE };
|
||||
enum ubd_req { UBD_READ, UBD_WRITE, UBD_FLUSH };
|
||||
|
||||
struct io_thread_req {
|
||||
struct request *req;
|
||||
@ -866,6 +866,7 @@ static int ubd_add(int n, char **error_out)
|
||||
goto out;
|
||||
}
|
||||
ubd_dev->queue->queuedata = ubd_dev;
|
||||
blk_queue_flush(ubd_dev->queue, REQ_FLUSH);
|
||||
|
||||
blk_queue_max_segments(ubd_dev->queue, MAX_SG);
|
||||
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
|
||||
@ -1238,6 +1239,19 @@ static void prepare_request(struct request *req, struct io_thread_req *io_req,
|
||||
|
||||
}
|
||||
|
||||
/* Called with dev->lock held */
|
||||
static void prepare_flush_request(struct request *req,
|
||||
struct io_thread_req *io_req)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
struct ubd *ubd_dev = disk->private_data;
|
||||
|
||||
io_req->req = req;
|
||||
io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
|
||||
ubd_dev->fd;
|
||||
io_req->op = UBD_FLUSH;
|
||||
}
|
||||
|
||||
/* Called with dev->lock held */
|
||||
static void do_ubd_request(struct request_queue *q)
|
||||
{
|
||||
@ -1259,6 +1273,20 @@ static void do_ubd_request(struct request_queue *q)
|
||||
}
|
||||
|
||||
req = dev->request;
|
||||
|
||||
if (req->cmd_flags & REQ_FLUSH) {
|
||||
io_req = kmalloc(sizeof(struct io_thread_req),
|
||||
GFP_ATOMIC);
|
||||
if (io_req == NULL) {
|
||||
if (list_empty(&dev->restart))
|
||||
list_add(&dev->restart, &restart);
|
||||
return;
|
||||
}
|
||||
prepare_flush_request(req, io_req);
|
||||
os_write_file(thread_fd, &io_req,
|
||||
sizeof(struct io_thread_req *));
|
||||
}
|
||||
|
||||
while(dev->start_sg < dev->end_sg){
|
||||
struct scatterlist *sg = &dev->sg[dev->start_sg];
|
||||
|
||||
@ -1367,6 +1395,17 @@ static void do_io(struct io_thread_req *req)
|
||||
int err;
|
||||
__u64 off;
|
||||
|
||||
if (req->op == UBD_FLUSH) {
|
||||
/* fds[0] is always either the rw image or our cow file */
|
||||
n = os_sync_file(req->fds[0]);
|
||||
if (n != 0) {
|
||||
printk("do_io - sync failed err = %d "
|
||||
"fd = %d\n", -n, req->fds[0]);
|
||||
req->error = 1;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
nsectors = req->length / req->sectorsize;
|
||||
start = 0;
|
||||
do {
|
||||
|
@ -141,6 +141,7 @@ extern int os_seek_file(int fd, unsigned long long offset);
|
||||
extern int os_open_file(const char *file, struct openflags flags, int mode);
|
||||
extern int os_read_file(int fd, void *buf, int len);
|
||||
extern int os_write_file(int fd, const void *buf, int count);
|
||||
extern int os_sync_file(int fd);
|
||||
extern int os_file_size(const char *file, unsigned long long *size_out);
|
||||
extern int os_file_modtime(const char *file, unsigned long *modtime);
|
||||
extern int os_pipe(int *fd, int stream, int close_on_exec);
|
||||
|
@ -266,6 +266,15 @@ int os_write_file(int fd, const void *buf, int len)
|
||||
return n;
|
||||
}
|
||||
|
||||
int os_sync_file(int fd)
|
||||
{
|
||||
int n = fsync(fd);
|
||||
|
||||
if (n < 0)
|
||||
return -errno;
|
||||
return n;
|
||||
}
|
||||
|
||||
int os_file_size(const char *file, unsigned long long *size_out)
|
||||
{
|
||||
struct uml_stat buf;
|
||||
|
Loading…
Reference in New Issue
Block a user