Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
  fuse: clean up annotations of fc->lock
  fuse: fix sparse warning in ioctl
  fuse: update interface version
  fuse: add fuse_conn->release()
  fuse: separate out fuse_conn_init() from new_conn()
  fuse: add fuse_ prefix to several functions
  fuse: implement poll support
  fuse: implement unsolicited notification
  fuse: add file kernel handle
  fuse: implement ioctl support
  fuse: don't let fuse_req->end() put the base reference
  fuse: move FUSE_MINOR to miscdevice.h
  fuse: style fixes
This commit is contained in:
Linus Torvalds 2009-01-06 17:01:20 -08:00
commit 5fec8bdbf9
8 changed files with 783 additions and 202 deletions

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -48,11 +48,13 @@ static ssize_t fuse_conn_waiting_read(struct file *file, char __user *buf,
size_t size;
if (!*ppos) {
long value;
struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
if (!fc)
return 0;
file->private_data=(void *)(long)atomic_read(&fc->num_waiting);
value = atomic_read(&fc->num_waiting);
file->private_data = (void *)value;
fuse_conn_put(fc);
}
size = sprintf(tmp, "%ld\n", (long)file->private_data);

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -269,7 +269,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
* Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
__releases(fc->lock)
__releases(&fc->lock)
{
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->end = NULL;
@ -293,13 +293,13 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
if (end)
end(fc, req);
else
fuse_put_request(fc, req);
fuse_put_request(fc, req);
}
static void wait_answer_interruptible(struct fuse_conn *fc,
struct fuse_req *req)
__releases(fc->lock) __acquires(fc->lock)
__releases(&fc->lock)
__acquires(&fc->lock)
{
if (signal_pending(current))
return;
@ -317,7 +317,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
__releases(fc->lock) __acquires(fc->lock)
__releases(&fc->lock)
__acquires(&fc->lock)
{
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
@ -380,7 +381,7 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
}
}
void request_send(struct fuse_conn *fc, struct fuse_req *req)
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 1;
spin_lock(&fc->lock);
@ -399,8 +400,8 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
spin_unlock(&fc->lock);
}
static void request_send_nowait_locked(struct fuse_conn *fc,
struct fuse_req *req)
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
struct fuse_req *req)
{
req->background = 1;
fc->num_background++;
@ -414,11 +415,11 @@ static void request_send_nowait_locked(struct fuse_conn *fc,
flush_bg_queue(fc);
}
static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
{
spin_lock(&fc->lock);
if (fc->connected) {
request_send_nowait_locked(fc, req);
fuse_request_send_nowait_locked(fc, req);
spin_unlock(&fc->lock);
} else {
req->out.h.error = -ENOTCONN;
@ -426,16 +427,16 @@ static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
}
}
void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 0;
request_send_nowait(fc, req);
fuse_request_send_nowait(fc, req);
}
void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 1;
request_send_nowait(fc, req);
fuse_request_send_nowait(fc, req);
}
/*
@ -443,10 +444,11 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
*
* fc->connected must have been checked previously
*/
void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
void fuse_request_send_background_locked(struct fuse_conn *fc,
struct fuse_req *req)
{
req->isreply = 1;
request_send_nowait_locked(fc, req);
fuse_request_send_nowait_locked(fc, req);
}
/*
@ -539,8 +541,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
BUG_ON(!cs->nr_segs);
cs->seglen = cs->iov[0].iov_len;
cs->addr = (unsigned long) cs->iov[0].iov_base;
cs->iov ++;
cs->nr_segs --;
cs->iov++;
cs->nr_segs--;
}
down_read(&current->mm->mmap_sem);
err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
@ -589,9 +591,11 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
kunmap_atomic(mapaddr, KM_USER1);
}
while (count) {
int err;
if (!cs->len && (err = fuse_copy_fill(cs)))
return err;
if (!cs->len) {
int err = fuse_copy_fill(cs);
if (err)
return err;
}
if (page) {
void *mapaddr = kmap_atomic(page, KM_USER1);
void *buf = mapaddr + offset;
@ -631,9 +635,11 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
{
while (size) {
int err;
if (!cs->len && (err = fuse_copy_fill(cs)))
return err;
if (!cs->len) {
int err = fuse_copy_fill(cs);
if (err)
return err;
}
fuse_copy_do(cs, &val, &size);
}
return 0;
@ -664,6 +670,8 @@ static int request_pending(struct fuse_conn *fc)
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
__releases(&fc->lock)
__acquires(&fc->lock)
{
DECLARE_WAITQUEUE(wait, current);
@ -691,7 +699,7 @@ static void request_wait(struct fuse_conn *fc)
*/
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_req *req,
const struct iovec *iov, unsigned long nr_segs)
__releases(fc->lock)
__releases(&fc->lock)
{
struct fuse_copy_state cs;
struct fuse_in_header ih;
@ -813,6 +821,34 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
return err;
}
static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_poll_wakeup_out outarg;
int err;
if (size != sizeof(outarg))
return -EINVAL;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
return err;
return fuse_notify_poll_wakeup(fc, &outarg);
}
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
unsigned int size, struct fuse_copy_state *cs)
{
switch (code) {
case FUSE_NOTIFY_POLL:
return fuse_notify_poll(fc, size, cs);
default:
return -EINVAL;
}
}
/* Look up request on processing list by unique ID */
static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
{
@ -876,9 +912,23 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
err = fuse_copy_one(&cs, &oh, sizeof(oh));
if (err)
goto err_finish;
err = -EINVAL;
if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
oh.len != nbytes)
if (oh.len != nbytes)
goto err_finish;
/*
* Zero oh.unique indicates unsolicited notification message
* and error contains notification code.
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
fuse_copy_finish(&cs);
return err ? err : nbytes;
}
err = -EINVAL;
if (oh.error <= -1000 || oh.error > 0)
goto err_finish;
spin_lock(&fc->lock);
@ -966,6 +1016,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
* This function releases and reacquires fc->lock
*/
static void end_requests(struct fuse_conn *fc, struct list_head *head)
__releases(&fc->lock)
__acquires(&fc->lock)
{
while (!list_empty(head)) {
struct fuse_req *req;
@ -988,7 +1040,8 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
* locked).
*/
static void end_io_requests(struct fuse_conn *fc)
__releases(fc->lock) __acquires(fc->lock)
__releases(&fc->lock)
__acquires(&fc->lock)
{
while (!list_empty(&fc->io)) {
struct fuse_req *req =
@ -1002,11 +1055,11 @@ static void end_io_requests(struct fuse_conn *fc)
wake_up(&req->waitq);
if (end) {
req->end = NULL;
/* The end function will consume this reference */
__fuse_get_request(req);
spin_unlock(&fc->lock);
wait_event(req->waitq, !req->locked);
end(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
}

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -189,7 +189,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
parent = dget_parent(entry);
fuse_lookup_init(fc, req, get_node_id(parent->d_inode),
&entry->d_name, &outarg);
request_send(fc, req);
fuse_request_send(fc, req);
dput(parent);
err = req->out.h.error;
fuse_put_request(fc, req);
@ -204,7 +204,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
return 0;
}
spin_lock(&fc->lock);
fi->nlookup ++;
fi->nlookup++;
spin_unlock(&fc->lock);
}
fuse_put_request(fc, forget_req);
@ -283,7 +283,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
attr_version = fuse_get_attr_version(fc);
fuse_lookup_init(fc, req, nodeid, name, outarg);
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
/* Zero nodeid is same as -ENOENT, but with valid timeout */
@ -369,7 +369,7 @@ static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff,
{
fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE);
ff->reserved_req->force = 1;
request_send(fc, ff->reserved_req);
fuse_request_send(fc, ff->reserved_req);
fuse_put_request(fc, ff->reserved_req);
kfree(ff);
}
@ -408,7 +408,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
goto out_put_forget_req;
err = -ENOMEM;
ff = fuse_file_alloc();
ff = fuse_file_alloc(fc);
if (!ff)
goto out_put_request;
@ -432,7 +432,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
req->out.args[0].value = &outentry;
req->out.args[1].size = sizeof(outopen);
req->out.args[1].value = &outopen;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
if (err) {
if (err == -ENOSYS)
@ -502,7 +502,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
else
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err)
@ -631,15 +631,17 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
req->in.numargs = 1;
req->in.args[0].size = entry->d_name.len + 1;
req->in.args[0].value = entry->d_name.name;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err) {
struct inode *inode = entry->d_inode;
/* Set nlink to zero so the inode can be cleared, if
the inode does have more links this will be
discovered at the next lookup/getattr */
/*
* Set nlink to zero so the inode can be cleared, if the inode
* does have more links this will be discovered at the next
* lookup/getattr.
*/
clear_nlink(inode);
fuse_invalidate_attr(inode);
fuse_invalidate_attr(dir);
@ -662,7 +664,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
req->in.numargs = 1;
req->in.args[0].size = entry->d_name.len + 1;
req->in.args[0].value = entry->d_name.name;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err) {
@ -695,7 +697,7 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
req->in.args[1].value = oldent->d_name.name;
req->in.args[2].size = newent->d_name.len + 1;
req->in.args[2].value = newent->d_name.name;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err) {
@ -811,7 +813,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
else
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err) {
@ -911,7 +913,7 @@ static int fuse_access(struct inode *inode, int mask)
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
@ -1033,7 +1035,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
req->num_pages = 1;
req->pages[0] = page;
fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR);
request_send(fc, req);
fuse_request_send(fc, req);
nbytes = req->out.args[0].size;
err = req->out.h.error;
fuse_put_request(fc, req);
@ -1067,7 +1069,7 @@ static char *read_link(struct dentry *dentry)
req->out.numargs = 1;
req->out.args[0].size = PAGE_SIZE - 1;
req->out.args[0].value = link;
request_send(fc, req);
fuse_request_send(fc, req);
if (req->out.h.error) {
free_page((unsigned long) link);
link = ERR_PTR(req->out.h.error);
@ -1273,7 +1275,7 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
else
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err) {
@ -1367,7 +1369,7 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
req->in.args[1].value = name;
req->in.args[2].size = size;
req->in.args[2].value = value;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
@ -1413,7 +1415,7 @@ static ssize_t fuse_getxattr(struct dentry *entry, const char *name,
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
}
request_send(fc, req);
fuse_request_send(fc, req);
ret = req->out.h.error;
if (!ret)
ret = size ? req->out.args[0].size : outarg.size;
@ -1463,7 +1465,7 @@ static ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size)
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
}
request_send(fc, req);
fuse_request_send(fc, req);
ret = req->out.h.error;
if (!ret)
ret = size ? req->out.args[0].size : outarg.size;
@ -1496,7 +1498,7 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
req->in.numargs = 1;
req->in.args[0].size = strlen(name) + 1;
req->in.args[0].value = name;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -39,14 +39,14 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
req->out.numargs = 1;
req->out.args[0].size = sizeof(*outargp);
req->out.args[0].value = outargp;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
return err;
}
struct fuse_file *fuse_file_alloc(void)
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
@ -58,7 +58,12 @@ struct fuse_file *fuse_file_alloc(void)
} else {
INIT_LIST_HEAD(&ff->write_entry);
atomic_set(&ff->count, 0);
spin_lock(&fc->lock);
ff->kh = ++fc->khctr;
spin_unlock(&fc->lock);
}
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
}
return ff;
}
@ -79,7 +84,6 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
dput(req->misc.release.dentry);
mntput(req->misc.release.vfsmount);
fuse_put_request(fc, req);
}
static void fuse_file_put(struct fuse_file *ff)
@ -89,7 +93,7 @@ static void fuse_file_put(struct fuse_file *ff)
struct inode *inode = req->misc.release.dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
req->end = fuse_release_end;
request_send_background(fc, req);
fuse_request_send_background(fc, req);
kfree(ff);
}
}
@ -109,6 +113,7 @@ void fuse_finish_open(struct inode *inode, struct file *file,
int fuse_open_common(struct inode *inode, struct file *file, int isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_open_out outarg;
struct fuse_file *ff;
int err;
@ -121,7 +126,7 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
if (err)
return err;
ff = fuse_file_alloc();
ff = fuse_file_alloc(fc);
if (!ff)
return -ENOMEM;
@ -167,7 +172,11 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir)
spin_lock(&fc->lock);
list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
wake_up_interruptible_sync(&ff->poll_wait);
/*
* Normally this will send the RELEASE request,
* however if some asynchronous READ or WRITE requests
@ -280,7 +289,7 @@ static int fuse_flush(struct file *file, fl_owner_t id)
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
req->force = 1;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
@ -344,7 +353,7 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
@ -396,7 +405,7 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file,
inarg->read_flags |= FUSE_READ_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
request_send(fc, req);
fuse_request_send(fc, req);
return req->out.args[0].size;
}
@ -493,7 +502,6 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
}
if (req->ff)
fuse_file_put(req->ff);
fuse_put_request(fc, req);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file,
@ -509,10 +517,11 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file,
struct fuse_file *ff = file->private_data;
req->ff = fuse_file_get(ff);
req->end = fuse_readpages_end;
request_send_background(fc, req);
fuse_request_send_background(fc, req);
} else {
request_send(fc, req);
fuse_request_send(fc, req);
fuse_readpages_end(fc, req);
fuse_put_request(fc, req);
}
}
@ -543,7 +552,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
}
}
req->pages[req->num_pages] = page;
req->num_pages ++;
req->num_pages++;
return 0;
}
@ -636,7 +645,7 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
request_send(fc, req);
fuse_request_send(fc, req);
return req->misc.write.out.size;
}
@ -1042,7 +1051,6 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
fuse_file_put(req->ff);
fuse_put_request(fc, req);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@ -1060,6 +1068,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
/* Called under fc->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
__releases(&fc->lock)
__acquires(&fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
loff_t size = i_size_read(req->inode);
@ -1079,13 +1089,14 @@ static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
req->in.args[1].size = inarg->size;
fi->writectr++;
request_send_background_locked(fc, req);
fuse_request_send_background_locked(fc, req);
return;
out_free:
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
@ -1096,6 +1107,8 @@ static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
* Called with fc->lock
*/
void fuse_flush_writepages(struct inode *inode)
__releases(&fc->lock)
__acquires(&fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
@ -1325,7 +1338,7 @@ static int fuse_getlk(struct file *file, struct file_lock *fl)
req->out.numargs = 1;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err)
@ -1357,7 +1370,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
return PTR_ERR(req);
fuse_lk_fill(req, file, fl, opcode, pid, flock);
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
/* locking is restartable */
if (err == -EINTR)
@ -1433,7 +1446,7 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
req->out.numargs = 1;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS)
@ -1470,6 +1483,406 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
return retval;
}
static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
unsigned int nr_segs, size_t bytes, bool to_user)
{
struct iov_iter ii;
int page_idx = 0;
if (!bytes)
return 0;
iov_iter_init(&ii, iov, nr_segs, bytes, 0);
while (iov_iter_count(&ii)) {
struct page *page = pages[page_idx++];
size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
void *kaddr, *map;
kaddr = map = kmap(page);
while (todo) {
char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
size_t iov_len = ii.iov->iov_len - ii.iov_offset;
size_t copy = min(todo, iov_len);
size_t left;
if (!to_user)
left = copy_from_user(kaddr, uaddr, copy);
else
left = copy_to_user(uaddr, kaddr, copy);
if (unlikely(left))
return -EFAULT;
iov_iter_advance(&ii, copy);
todo -= copy;
kaddr += copy;
}
kunmap(map);
}
return 0;
}
/*
* For ioctls, there is no generic way to determine how much memory
* needs to be read and/or written. Furthermore, ioctls are allowed
* to dereference the passed pointer, so the parameter requires deep
* copying but FUSE has no idea whatsoever about what to copy in or
* out.
*
* This is solved by allowing FUSE server to retry ioctl with
* necessary in/out iovecs. Let's assume the ioctl implementation
* needs to read in the following structure.
*
* struct a {
* char *buf;
* size_t buflen;
* }
*
* On the first callout to FUSE server, inarg->in_size and
* inarg->out_size will be NULL; then, the server completes the ioctl
* with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
* the actual iov array to
*
* { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
*
* which tells FUSE to copy in the requested area and retry the ioctl.
* On the second round, the server has access to the structure and
* from that it can tell what to look for next, so on the invocation,
* it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
*
* { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
* { .iov_base = a.buf, .iov_len = a.buflen } }
*
* FUSE will copy both struct a and the pointed buffer from the
* process doing the ioctl and retry ioctl with both struct a and the
* buffer.
*
* This time, FUSE server has everything it needs and completes ioctl
* without FUSE_IOCTL_RETRY which finishes the ioctl call.
*
* Copying data out works the same way.
*
* Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
* automatically initializes in and out iovs by decoding @cmd with
* _IOC_* macros and the server is not allowed to request RETRY. This
* limits ioctl data transfers to well-formed ioctls and is the forced
* behavior for all FUSE servers.
*/
static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_ioctl_in inarg = {
.fh = ff->fh,
.cmd = cmd,
.arg = arg,
.flags = flags
};
struct fuse_ioctl_out outarg;
struct fuse_req *req = NULL;
struct page **pages = NULL;
struct page *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
size_t in_size, out_size, transferred;
int err;
/* assume all the iovs returned by client always fits in a page */
BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
if (!fuse_allow_task(fc, current))
return -EACCES;
err = -EIO;
if (is_bad_inode(inode))
goto out;
err = -ENOMEM;
pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
iov_page = alloc_page(GFP_KERNEL);
if (!pages || !iov_page)
goto out;
/*
* If restricted, initialize IO parameters as encoded in @cmd.
* RETRY from server is not allowed.
*/
if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
struct iovec *iov = page_address(iov_page);
iov->iov_base = (void __user *)arg;
iov->iov_len = _IOC_SIZE(cmd);
if (_IOC_DIR(cmd) & _IOC_WRITE) {
in_iov = iov;
in_iovs = 1;
}
if (_IOC_DIR(cmd) & _IOC_READ) {
out_iov = iov;
out_iovs = 1;
}
}
retry:
inarg.in_size = in_size = iov_length(in_iov, in_iovs);
inarg.out_size = out_size = iov_length(out_iov, out_iovs);
/*
* Out data can be used either for actual out data or iovs,
* make sure there always is at least one page.
*/
out_size = max_t(size_t, out_size, PAGE_SIZE);
max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
/* make sure there are enough buffer pages and init request with them */
err = -ENOMEM;
if (max_pages > FUSE_MAX_PAGES_PER_REQ)
goto out;
while (num_pages < max_pages) {
pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!pages[num_pages])
goto out;
num_pages++;
}
req = fuse_get_req(fc);
if (IS_ERR(req)) {
err = PTR_ERR(req);
req = NULL;
goto out;
}
memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
req->num_pages = num_pages;
/* okay, let's send it to the client */
req->in.h.opcode = FUSE_IOCTL;
req->in.h.nodeid = get_node_id(inode);
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
if (in_size) {
req->in.numargs++;
req->in.args[1].size = in_size;
req->in.argpages = 1;
err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
false);
if (err)
goto out;
}
req->out.numargs = 2;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
req->out.args[1].size = out_size;
req->out.argpages = 1;
req->out.argvar = 1;
fuse_request_send(fc, req);
err = req->out.h.error;
transferred = req->out.args[1].size;
fuse_put_request(fc, req);
req = NULL;
if (err)
goto out;
/* did it ask for retry? */
if (outarg.flags & FUSE_IOCTL_RETRY) {
char *vaddr;
/* no retry if in restricted mode */
err = -EIO;
if (!(flags & FUSE_IOCTL_UNRESTRICTED))
goto out;
in_iovs = outarg.in_iovs;
out_iovs = outarg.out_iovs;
/*
* Make sure things are in boundary, separate checks
* are to protect against overflow.
*/
err = -ENOMEM;
if (in_iovs > FUSE_IOCTL_MAX_IOV ||
out_iovs > FUSE_IOCTL_MAX_IOV ||
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
err = -EIO;
if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
goto out;
/* okay, copy in iovs and retry */
vaddr = kmap_atomic(pages[0], KM_USER0);
memcpy(page_address(iov_page), vaddr, transferred);
kunmap_atomic(vaddr, KM_USER0);
in_iov = page_address(iov_page);
out_iov = in_iov + in_iovs;
goto retry;
}
err = -EIO;
if (transferred > inarg.out_size)
goto out;
err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
out:
if (req)
fuse_put_request(fc, req);
if (iov_page)
__free_page(iov_page);
while (num_pages)
__free_page(pages[--num_pages]);
kfree(pages);
return err ? err : outarg.result;
}
static long fuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_file_do_ioctl(file, cmd, arg, 0);
}
static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT);
}
/*
* All files which have been polled are linked to RB tree
* fuse_conn->polled_files which is indexed by kh. Walk the tree and
* find the matching one.
*/
static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
struct rb_node **parent_out)
{
struct rb_node **link = &fc->polled_files.rb_node;
struct rb_node *last = NULL;
while (*link) {
struct fuse_file *ff;
last = *link;
ff = rb_entry(last, struct fuse_file, polled_node);
if (kh < ff->kh)
link = &last->rb_left;
else if (kh > ff->kh)
link = &last->rb_right;
else
return link;
}
if (parent_out)
*parent_out = last;
return link;
}
/*
* The file is about to be polled. Make sure it's on the polled_files
* RB tree. Note that files once added to the polled_files tree are
* not removed before the file is released. This is because a file
* polled once is likely to be polled again.
*/
static void fuse_register_polled_file(struct fuse_conn *fc,
struct fuse_file *ff)
{
spin_lock(&fc->lock);
if (RB_EMPTY_NODE(&ff->polled_node)) {
struct rb_node **link, *parent;
link = fuse_find_polled_node(fc, ff->kh, &parent);
BUG_ON(*link);
rb_link_node(&ff->polled_node, parent, link);
rb_insert_color(&ff->polled_node, &fc->polled_files);
}
spin_unlock(&fc->lock);
}
static unsigned fuse_file_poll(struct file *file, poll_table *wait)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
struct fuse_req *req;
int err;
if (fc->no_poll)
return DEFAULT_POLLMASK;
poll_wait(file, &ff->poll_wait, wait);
/*
* Ask for notification iff there's someone waiting for it.
* The client may ignore the flag and always notify.
*/
if (waitqueue_active(&ff->poll_wait)) {
inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fuse_register_polled_file(fc, ff);
}
req = fuse_get_req(fc);
if (IS_ERR(req))
return PTR_ERR(req);
req->in.h.opcode = FUSE_POLL;
req->in.h.nodeid = get_node_id(inode);
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
req->out.numargs = 1;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err)
return outarg.revents;
if (err == -ENOSYS) {
fc->no_poll = 1;
return DEFAULT_POLLMASK;
}
return POLLERR;
}
/*
* This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
* wakes up the poll waiters.
*/
int fuse_notify_poll_wakeup(struct fuse_conn *fc,
struct fuse_notify_poll_wakeup_out *outarg)
{
u64 kh = outarg->kh;
struct rb_node **link;
spin_lock(&fc->lock);
link = fuse_find_polled_node(fc, kh, NULL);
if (*link) {
struct fuse_file *ff;
ff = rb_entry(*link, struct fuse_file, polled_node);
wake_up_interruptible_sync(&ff->poll_wait);
}
spin_unlock(&fc->lock);
return 0;
}
static const struct file_operations fuse_file_operations = {
.llseek = fuse_file_llseek,
.read = do_sync_read,
@ -1484,6 +1897,9 @@ static const struct file_operations fuse_file_operations = {
.lock = fuse_file_lock,
.flock = fuse_file_flock,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
};
static const struct file_operations fuse_direct_io_file_operations = {
@ -1496,6 +1912,9 @@ static const struct file_operations fuse_direct_io_file_operations = {
.fsync = fuse_fsync,
.lock = fuse_file_lock,
.flock = fuse_file_flock,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
/* no mmap and splice_read */
};

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -19,6 +19,8 @@
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@ -100,6 +102,9 @@ struct fuse_file {
/** Request reserved for flush and release */
struct fuse_req *reserved_req;
/** Kernel file handle guaranteed to be unique */
u64 kh;
/** File handle used by userspace */
u64 fh;
@ -108,6 +113,12 @@ struct fuse_file {
/** Entry on inode's write_files list */
struct list_head write_entry;
/** RB node to be linked on fuse_conn->polled_files */
struct rb_node polled_node;
/** Wait queue head for poll */
wait_queue_head_t poll_wait;
};
/** One input argument of a request */
@ -322,6 +333,12 @@ struct fuse_conn {
/** The list of requests under I/O */
struct list_head io;
/** The next unique kernel file handle */
u64 khctr;
/** rbtree of fuse_files waiting for poll events indexed by ph */
struct rb_root polled_files;
/** Number of requests currently in the background */
unsigned num_background;
@ -355,19 +372,19 @@ struct fuse_conn {
/** Connection failed (version mismatch). Cannot race with
setting other bitfields since it is only set once in INIT
reply, before any other request, and never cleared */
unsigned conn_error : 1;
unsigned conn_error:1;
/** Connection successful. Only set in INIT */
unsigned conn_init : 1;
unsigned conn_init:1;
/** Do readpages asynchronously? Only set in INIT */
unsigned async_read : 1;
unsigned async_read:1;
/** Do not send separate SETATTR request before open(O_TRUNC) */
unsigned atomic_o_trunc : 1;
unsigned atomic_o_trunc:1;
/** Filesystem supports NFS exporting. Only set in INIT */
unsigned export_support : 1;
unsigned export_support:1;
/*
* The following bitfields are only for optimization purposes
@ -375,43 +392,46 @@ struct fuse_conn {
*/
/** Is fsync not implemented by fs? */
unsigned no_fsync : 1;
unsigned no_fsync:1;
/** Is fsyncdir not implemented by fs? */
unsigned no_fsyncdir : 1;
unsigned no_fsyncdir:1;
/** Is flush not implemented by fs? */
unsigned no_flush : 1;
unsigned no_flush:1;
/** Is setxattr not implemented by fs? */
unsigned no_setxattr : 1;
unsigned no_setxattr:1;
/** Is getxattr not implemented by fs? */
unsigned no_getxattr : 1;
unsigned no_getxattr:1;
/** Is listxattr not implemented by fs? */
unsigned no_listxattr : 1;
unsigned no_listxattr:1;
/** Is removexattr not implemented by fs? */
unsigned no_removexattr : 1;
unsigned no_removexattr:1;
/** Are file locking primitives not implemented by fs? */
unsigned no_lock : 1;
unsigned no_lock:1;
/** Is access not implemented by fs? */
unsigned no_access : 1;
unsigned no_access:1;
/** Is create not implemented by fs? */
unsigned no_create : 1;
unsigned no_create:1;
/** Is interrupt not implemented by fs? */
unsigned no_interrupt : 1;
unsigned no_interrupt:1;
/** Is bmap not implemented by fs? */
unsigned no_bmap : 1;
unsigned no_bmap:1;
/** Is poll not implemented by fs? */
unsigned no_poll:1;
/** Do multi-page cached writes */
unsigned big_writes : 1;
unsigned big_writes:1;
/** The number of requests waiting for completion */
atomic_t num_waiting;
@ -445,6 +465,9 @@ struct fuse_conn {
/** Version counter for attribute changes */
u64 attr_version;
/** Called on final put */
void (*release)(struct fuse_conn *);
};
static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb)
@ -499,7 +522,7 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
*/
int fuse_open_common(struct inode *inode, struct file *file, int isdir);
struct fuse_file *fuse_file_alloc(void);
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc);
void fuse_file_free(struct fuse_file *ff);
void fuse_finish_open(struct inode *inode, struct file *file,
struct fuse_file *ff, struct fuse_open_out *outarg);
@ -518,6 +541,12 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir);
int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
int isdir);
/**
* Notify poll wakeup
*/
int fuse_notify_poll_wakeup(struct fuse_conn *fc,
struct fuse_notify_poll_wakeup_out *outarg);
/**
* Initialize file operations on a regular file
*/
@ -593,19 +622,20 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req);
/**
* Send a request (synchronous)
*/
void request_send(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req);
/**
* Send a request with no reply
*/
void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
/**
* Send a request in the background
*/
void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req);
void fuse_request_send_background_locked(struct fuse_conn *fc,
struct fuse_req *req);
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc);
@ -622,6 +652,11 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
*/
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
/**
* Initialize fuse_conn
*/
int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb);
/**
* Release reference to fuse_conn
*/

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -37,10 +37,10 @@ struct fuse_mount_data {
unsigned rootmode;
unsigned user_id;
unsigned group_id;
unsigned fd_present : 1;
unsigned rootmode_present : 1;
unsigned user_id_present : 1;
unsigned group_id_present : 1;
unsigned fd_present:1;
unsigned rootmode_present:1;
unsigned user_id_present:1;
unsigned group_id_present:1;
unsigned flags;
unsigned max_read;
unsigned blksize;
@ -94,7 +94,7 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_forget_in);
req->in.args[0].value = inarg;
request_send_noreply(fc, req);
fuse_request_send_noreply(fc, req);
}
static void fuse_clear_inode(struct inode *inode)
@ -250,7 +250,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
fi->nlookup ++;
fi->nlookup++;
spin_unlock(&fc->lock);
fuse_change_attributes(inode, attr, attr_valid, attr_version);
@ -269,7 +269,7 @@ static void fuse_send_destroy(struct fuse_conn *fc)
fc->destroy_req = NULL;
req->in.h.opcode = FUSE_DESTROY;
req->force = 1;
request_send(fc, req);
fuse_request_send(fc, req);
fuse_put_request(fc, req);
}
}
@ -334,7 +334,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
req->out.args[0].size =
fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
req->out.args[0].value = &outarg;
request_send(fc, req);
fuse_request_send(fc, req);
err = req->out.h.error;
if (!err)
convert_fuse_statfs(buf, &outarg.st);
@ -462,68 +462,69 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
return 0;
}
static struct fuse_conn *new_conn(struct super_block *sb)
int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb)
{
struct fuse_conn *fc;
int err;
fc = kzalloc(sizeof(*fc), GFP_KERNEL);
if (fc) {
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
init_waitqueue_head(&fc->blocked_waitq);
init_waitqueue_head(&fc->reserved_req_waitq);
INIT_LIST_HEAD(&fc->pending);
INIT_LIST_HEAD(&fc->processing);
INIT_LIST_HEAD(&fc->io);
INIT_LIST_HEAD(&fc->interrupts);
INIT_LIST_HEAD(&fc->bg_queue);
atomic_set(&fc->num_waiting, 0);
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
fc->dev = sb->s_dev;
err = bdi_init(&fc->bdi);
if (err)
goto error_kfree;
if (sb->s_bdev) {
err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
MAJOR(fc->dev), MINOR(fc->dev));
} else {
err = bdi_register_dev(&fc->bdi, fc->dev);
}
if (err)
goto error_bdi_destroy;
/*
* For a single fuse filesystem use max 1% of dirty +
* writeback threshold.
*
* This gives about 1M of write buffer for memory maps on a
* machine with 1G and 10% dirty_ratio, which should be more
* than enough.
*
* Privileged users can raise it by writing to
*
* /sys/class/bdi/<bdi>/max_ratio
*/
bdi_set_max_ratio(&fc->bdi, 1);
fc->reqctr = 0;
fc->blocked = 1;
fc->attr_version = 1;
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
atomic_set(&fc->count, 1);
init_waitqueue_head(&fc->waitq);
init_waitqueue_head(&fc->blocked_waitq);
init_waitqueue_head(&fc->reserved_req_waitq);
INIT_LIST_HEAD(&fc->pending);
INIT_LIST_HEAD(&fc->processing);
INIT_LIST_HEAD(&fc->io);
INIT_LIST_HEAD(&fc->interrupts);
INIT_LIST_HEAD(&fc->bg_queue);
INIT_LIST_HEAD(&fc->entry);
atomic_set(&fc->num_waiting, 0);
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
fc->khctr = 0;
fc->polled_files = RB_ROOT;
fc->dev = sb->s_dev;
err = bdi_init(&fc->bdi);
if (err)
goto error_mutex_destroy;
if (sb->s_bdev) {
err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
MAJOR(fc->dev), MINOR(fc->dev));
} else {
err = bdi_register_dev(&fc->bdi, fc->dev);
}
return fc;
if (err)
goto error_bdi_destroy;
/*
* For a single fuse filesystem use max 1% of dirty +
* writeback threshold.
*
* This gives about 1M of write buffer for memory maps on a
* machine with 1G and 10% dirty_ratio, which should be more
* than enough.
*
* Privileged users can raise it by writing to
*
* /sys/class/bdi/<bdi>/max_ratio
*/
bdi_set_max_ratio(&fc->bdi, 1);
fc->reqctr = 0;
fc->blocked = 1;
fc->attr_version = 1;
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
error_bdi_destroy:
return 0;
error_bdi_destroy:
bdi_destroy(&fc->bdi);
error_kfree:
error_mutex_destroy:
mutex_destroy(&fc->inst_mutex);
kfree(fc);
return NULL;
return err;
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
void fuse_conn_put(struct fuse_conn *fc)
{
@ -532,7 +533,7 @@ void fuse_conn_put(struct fuse_conn *fc)
fuse_request_free(fc->destroy_req);
mutex_destroy(&fc->inst_mutex);
bdi_destroy(&fc->bdi);
kfree(fc);
fc->release(fc);
}
}
@ -542,7 +543,7 @@ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
return fc;
}
static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
{
struct fuse_attr attr;
memset(&attr, 0, sizeof(attr));
@ -553,8 +554,7 @@ static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
return fuse_iget(sb, 1, 0, &attr, 0, 0);
}
struct fuse_inode_handle
{
struct fuse_inode_handle {
u64 nodeid;
u32 generation;
};
@ -761,7 +761,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
fuse_put_request(fc, req);
fc->blocked = 0;
wake_up_all(&fc->blocked_waitq);
}
@ -787,7 +786,12 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
req->out.args[0].size = sizeof(struct fuse_init_out);
req->out.args[0].value = &req->misc.init_out;
req->end = process_init_reply;
request_send_background(fc, req);
fuse_request_send_background(fc, req);
}
static void fuse_free_conn(struct fuse_conn *fc)
{
kfree(fc);
}
static int fuse_fill_super(struct super_block *sb, void *data, int silent)
@ -828,10 +832,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (file->f_op != &fuse_dev_operations)
return -EINVAL;
fc = new_conn(sb);
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
if (!fc)
return -ENOMEM;
err = fuse_conn_init(fc, sb);
if (err) {
kfree(fc);
return err;
}
fc->release = fuse_free_conn;
fc->flags = d.flags;
fc->user_id = d.user_id;
fc->group_id = d.group_id;
@ -841,7 +852,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = fc;
err = -ENOMEM;
root = get_root_inode(sb, d.rootmode);
root = fuse_get_root_inode(sb, d.rootmode);
if (!root)
goto err;
@ -952,7 +963,7 @@ static inline void unregister_fuseblk(void)
static void fuse_inode_init_once(void *foo)
{
struct inode * inode = foo;
struct inode *inode = foo;
inode_init_once(inode);
}
@ -1031,7 +1042,7 @@ static int __init fuse_init(void)
{
int res;
printk("fuse init (API version %i.%i)\n",
printk(KERN_INFO "fuse init (API version %i.%i)\n",
FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
INIT_LIST_HEAD(&fuse_conn_list);

View File

@ -1,6 +1,6 @@
/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
@ -20,29 +20,27 @@
*
* 7.10
* - add nonseekable open flag
*
* 7.11
* - add IOCTL message
* - add unsolicited notification support
* - add POLL message and NOTIFY_POLL notification
*/
#ifndef _LINUX_FUSE_H
#define _LINUX_FUSE_H
#include <asm/types.h>
#include <linux/major.h>
#include <linux/types.h>
/** Version number of this interface */
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
#define FUSE_KERNEL_MINOR_VERSION 10
#define FUSE_KERNEL_MINOR_VERSION 11
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
/** The major number of the fuse character device */
#define FUSE_MAJOR MISC_MAJOR
/** The minor number of the fuse character device */
#define FUSE_MINOR 229
/* Make sure all structures are padded to 64bit boundary, so 32bit
userspace works under 64bit kernels */
@ -151,6 +149,28 @@ struct fuse_file_lock {
*/
#define FUSE_READ_LOCKOWNER (1 << 1)
/**
* Ioctl flags
*
* FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine
* FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed
* FUSE_IOCTL_RETRY: retry with new iovecs
*
* FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs
*/
#define FUSE_IOCTL_COMPAT (1 << 0)
#define FUSE_IOCTL_UNRESTRICTED (1 << 1)
#define FUSE_IOCTL_RETRY (1 << 2)
#define FUSE_IOCTL_MAX_IOV 256
/**
* Poll flags
*
* FUSE_POLL_SCHEDULE_NOTIFY: request poll notify
*/
#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0)
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
@ -188,6 +208,13 @@ enum fuse_opcode {
FUSE_INTERRUPT = 36,
FUSE_BMAP = 37,
FUSE_DESTROY = 38,
FUSE_IOCTL = 39,
FUSE_POLL = 40,
};
enum fuse_notify_code {
FUSE_NOTIFY_POLL = 1,
FUSE_NOTIFY_CODE_MAX,
};
/* The read buffer is required to be at least 8k, but may be much larger */
@ -388,6 +415,38 @@ struct fuse_bmap_out {
__u64 block;
};
struct fuse_ioctl_in {
__u64 fh;
__u32 flags;
__u32 cmd;
__u64 arg;
__u32 in_size;
__u32 out_size;
};
struct fuse_ioctl_out {
__s32 result;
__u32 flags;
__u32 in_iovs;
__u32 out_iovs;
};
struct fuse_poll_in {
__u64 fh;
__u64 kh;
__u32 flags;
__u32 padding;
};
struct fuse_poll_out {
__u32 revents;
__u32 padding;
};
struct fuse_notify_poll_wakeup_out {
__u64 kh;
};
struct fuse_in_header {
__u32 len;
__u32 opcode;

View File

@ -3,33 +3,33 @@
#include <linux/module.h>
#include <linux/major.h>
#define PSMOUSE_MINOR 1
#define MS_BUSMOUSE_MINOR 2
#define ATIXL_BUSMOUSE_MINOR 3
/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */
#define ATARIMOUSE_MINOR 5
#define SUN_MOUSE_MINOR 6
#define APOLLO_MOUSE_MINOR 7
#define PC110PAD_MINOR 9
/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
#define PSMOUSE_MINOR 1
#define MS_BUSMOUSE_MINOR 2
#define ATIXL_BUSMOUSE_MINOR 3
/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */
#define ATARIMOUSE_MINOR 5
#define SUN_MOUSE_MINOR 6
#define APOLLO_MOUSE_MINOR 7
#define PC110PAD_MINOR 9
/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
#define WATCHDOG_MINOR 130 /* Watchdog timer */
#define TEMP_MINOR 131 /* Temperature Sensor */
#define RTC_MINOR 135
#define RTC_MINOR 135
#define EFI_RTC_MINOR 136 /* EFI Time services */
#define SUN_OPENPROM_MINOR 139
#define SUN_OPENPROM_MINOR 139
#define DMAPI_MINOR 140 /* DMAPI */
#define NVRAM_MINOR 144
#define SGI_MMTIMER 153
#define NVRAM_MINOR 144
#define SGI_MMTIMER 153
#define STORE_QUEUE_MINOR 155
#define I2O_MINOR 166
#define I2O_MINOR 166
#define MICROCODE_MINOR 184
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define MISC_DYNAMIC_MINOR 255
#define TUN_MINOR 200
#define HPET_MINOR 228
#define KVM_MINOR 232
#define TUN_MINOR 200
#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
#define MPT_MINOR 220
#define HPET_MINOR 228
#define FUSE_MINOR 229
#define KVM_MINOR 232
#define MISC_DYNAMIC_MINOR 255
struct device;