dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'cuse' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse

* 'cuse' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
  CUSE: implement CUSE - Character device in Userspace
  fuse: export symbols to be used by CUSE
  fuse: update fuse_conn_init() and separate out fuse_conn_kill()
  fuse: don't use inode in fuse_file_poll
  fuse: don't use inode in fuse_do_ioctl() helper
  fuse: don't use inode in fuse_sync_release()
  fuse: create fuse_do_open() helper for CUSE
  fuse: clean up args in fuse_finish_open() and fuse_release_fill()
  fuse: don't use inode in helpers called by fuse_direct_io()
  fuse: add members to struct fuse_file
  fuse: prepare fuse_direct_io() for CUSE
  fuse: clean up fuse_write_fill()
  fuse: use struct path in release structure
  fuse: misc cleanups
This commit is contained in:
Linus Torvalds 2009-06-12 09:31:20 -07:00
commit c34752bc8b
9 changed files with 985 additions and 236 deletions

View File

@ -62,6 +62,16 @@ source "fs/autofs/Kconfig"
source "fs/autofs4/Kconfig"
source "fs/fuse/Kconfig"
config CUSE
tristate "Character device in Userpace support"
depends on FUSE_FS
help
This FUSE extension allows character devices to be
implemented in userspace.
If you want to develop or use userspace character device
based on CUSE, answer Y or M.
config GENERIC_ACL
bool
select FS_POSIX_ACL

View File

@ -3,5 +3,6 @@
#
obj-$(CONFIG_FUSE_FS) += fuse.o
obj-$(CONFIG_CUSE) += cuse.o
fuse-objs := dev.o dir.o file.o inode.o control.o

610
fs/fuse/cuse.c Normal file
View File

@ -0,0 +1,610 @@
/*
* CUSE: Character device in Userspace
*
* Copyright (C) 2008-2009 SUSE Linux Products GmbH
* Copyright (C) 2008-2009 Tejun Heo <tj@kernel.org>
*
* This file is released under the GPLv2.
*
* CUSE enables character devices to be implemented from userland much
* like FUSE allows filesystems. On initialization /dev/cuse is
* created. By opening the file and replying to the CUSE_INIT request
* userland CUSE server can create a character device. After that the
* operation is very similar to FUSE.
*
* A CUSE instance involves the following objects.
*
* cuse_conn : contains fuse_conn and serves as bonding structure
* channel : file handle connected to the userland CUSE server
* cdev : the implemented character device
* dev : generic device for cdev
*
* Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with
* devices, it's called 'channel' to reduce confusion.
*
* channel determines when the character device dies. When channel is
* closed, everything begins to destruct. The cuse_conn is taken off
* the lookup table preventing further access from cdev, cdev and
* generic device are removed and the base reference of cuse_conn is
* put.
*
* On each open, the matching cuse_conn is looked up and if found an
* additional reference is taken which is released when the file is
* closed.
*/
#include <linux/fuse.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/magic.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include "fuse_i.h"
#define CUSE_CONNTBL_LEN 64
struct cuse_conn {
struct list_head list; /* linked on cuse_conntbl */
struct fuse_conn fc; /* fuse connection */
struct cdev *cdev; /* associated character device */
struct device *dev; /* device representing @cdev */
/* init parameters, set once during initialization */
bool unrestricted_ioctl;
};
static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */
static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
static struct class *cuse_class;
static struct cuse_conn *fc_to_cc(struct fuse_conn *fc)
{
return container_of(fc, struct cuse_conn, fc);
}
static struct list_head *cuse_conntbl_head(dev_t devt)
{
return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN];
}
/**************************************************************************
* CUSE frontend operations
*
* These are file operations for the character device.
*
* On open, CUSE opens a file from the FUSE mnt and stores it to
* private_data of the open file. All other ops call FUSE ops on the
* FUSE file.
*/
static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = 0;
return fuse_direct_io(file, buf, count, &pos, 0);
}
static ssize_t cuse_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = 0;
/*
* No locking or generic_write_checks(), the server is
* responsible for locking and sanity checks.
*/
return fuse_direct_io(file, buf, count, &pos, 1);
}
static int cuse_open(struct inode *inode, struct file *file)
{
dev_t devt = inode->i_cdev->dev;
struct cuse_conn *cc = NULL, *pos;
int rc;
/* look up and get the connection */
spin_lock(&cuse_lock);
list_for_each_entry(pos, cuse_conntbl_head(devt), list)
if (pos->dev->devt == devt) {
fuse_conn_get(&pos->fc);
cc = pos;
break;
}
spin_unlock(&cuse_lock);
/* dead? */
if (!cc)
return -ENODEV;
/*
* Generic permission check is already done against the chrdev
* file, proceed to open.
*/
rc = fuse_do_open(&cc->fc, 0, file, 0);
if (rc)
fuse_conn_put(&cc->fc);
return rc;
}
static int cuse_release(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
fuse_sync_release(ff, file->f_flags);
fuse_conn_put(fc);
return 0;
}
static long cuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fuse_file *ff = file->private_data;
struct cuse_conn *cc = fc_to_cc(ff->fc);
unsigned int flags = 0;
if (cc->unrestricted_ioctl)
flags |= FUSE_IOCTL_UNRESTRICTED;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fuse_file *ff = file->private_data;
struct cuse_conn *cc = fc_to_cc(ff->fc);
unsigned int flags = FUSE_IOCTL_COMPAT;
if (cc->unrestricted_ioctl)
flags |= FUSE_IOCTL_UNRESTRICTED;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static const struct file_operations cuse_frontend_fops = {
.owner = THIS_MODULE,
.read = cuse_read,
.write = cuse_write,
.open = cuse_open,
.release = cuse_release,
.unlocked_ioctl = cuse_file_ioctl,
.compat_ioctl = cuse_file_compat_ioctl,
.poll = fuse_file_poll,
};
/**************************************************************************
* CUSE channel initialization and destruction
*/
struct cuse_devinfo {
const char *name;
};
/**
* cuse_parse_one - parse one key=value pair
* @pp: i/o parameter for the current position
* @end: points to one past the end of the packed string
* @keyp: out parameter for key
* @valp: out parameter for value
*
* *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends
* at @end - 1. This function parses one pair and set *@keyp to the
* start of the key and *@valp to the start of the value. Note that
* the original string is modified such that the key string is
* terminated with '\0'. *@pp is updated to point to the next string.
*
* RETURNS:
* 1 on successful parse, 0 on EOF, -errno on failure.
*/
static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
{
char *p = *pp;
char *key, *val;
while (p < end && *p == '\0')
p++;
if (p == end)
return 0;
if (end[-1] != '\0') {
printk(KERN_ERR "CUSE: info not properly terminated\n");
return -EINVAL;
}
key = val = p;
p += strlen(p);
if (valp) {
strsep(&val, "=");
if (!val)
val = key + strlen(key);
key = strstrip(key);
val = strstrip(val);
} else
key = strstrip(key);
if (!strlen(key)) {
printk(KERN_ERR "CUSE: zero length info key specified\n");
return -EINVAL;
}
*pp = p;
*keyp = key;
if (valp)
*valp = val;
return 1;
}
/**
* cuse_parse_dev_info - parse device info
* @p: device info string
* @len: length of device info string
* @devinfo: out parameter for parsed device info
*
* Parse @p to extract device info and store it into @devinfo. String
* pointed to by @p is modified by parsing and @devinfo points into
* them, so @p shouldn't be freed while @devinfo is in use.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
{
char *end = p + len;
char *key, *val;
int rc;
while (true) {
rc = cuse_parse_one(&p, end, &key, &val);
if (rc < 0)
return rc;
if (!rc)
break;
if (strcmp(key, "DEVNAME") == 0)
devinfo->name = val;
else
printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n",
key);
}
if (!devinfo->name || !strlen(devinfo->name)) {
printk(KERN_ERR "CUSE: DEVNAME unspecified\n");
return -EINVAL;
}
return 0;
}
static void cuse_gendev_release(struct device *dev)
{
kfree(dev);
}
/**
* cuse_process_init_reply - finish initializing CUSE channel
*
* This function creates the character device and sets up all the
* required data structures for it. Please read the comment at the
* top of this file for high level overview.
*/
static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
{
struct cuse_conn *cc = fc_to_cc(fc);
struct cuse_init_out *arg = &req->misc.cuse_init_out;
struct page *page = req->pages[0];
struct cuse_devinfo devinfo = { };
struct device *dev;
struct cdev *cdev;
dev_t devt;
int rc;
if (req->out.h.error ||
arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
goto err;
}
fc->minor = arg->minor;
fc->max_read = max_t(unsigned, arg->max_read, 4096);
fc->max_write = max_t(unsigned, arg->max_write, 4096);
/* parse init reply */
cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL;
rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size,
&devinfo);
if (rc)
goto err;
/* determine and reserve devt */
devt = MKDEV(arg->dev_major, arg->dev_minor);
if (!MAJOR(devt))
rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name);
else
rc = register_chrdev_region(devt, 1, devinfo.name);
if (rc) {
printk(KERN_ERR "CUSE: failed to register chrdev region\n");
goto err;
}
/* devt determined, create device */
rc = -ENOMEM;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto err_region;
device_initialize(dev);
dev_set_uevent_suppress(dev, 1);
dev->class = cuse_class;
dev->devt = devt;
dev->release = cuse_gendev_release;
dev_set_drvdata(dev, cc);
dev_set_name(dev, "%s", devinfo.name);
rc = device_add(dev);
if (rc)
goto err_device;
/* register cdev */
rc = -ENOMEM;
cdev = cdev_alloc();
if (!cdev)
goto err_device;
cdev->owner = THIS_MODULE;
cdev->ops = &cuse_frontend_fops;
rc = cdev_add(cdev, devt, 1);
if (rc)
goto err_cdev;
cc->dev = dev;
cc->cdev = cdev;
/* make the device available */
spin_lock(&cuse_lock);
list_add(&cc->list, cuse_conntbl_head(devt));
spin_unlock(&cuse_lock);
/* announce device availability */
dev_set_uevent_suppress(dev, 0);
kobject_uevent(&dev->kobj, KOBJ_ADD);
out:
__free_page(page);
return;
err_cdev:
cdev_del(cdev);
err_device:
put_device(dev);
err_region:
unregister_chrdev_region(devt, 1);
err:
fc->conn_error = 1;
goto out;
}
static int cuse_send_init(struct cuse_conn *cc)
{
int rc;
struct fuse_req *req;
struct page *page;
struct fuse_conn *fc = &cc->fc;
struct cuse_init_in *arg;
BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE);
req = fuse_get_req(fc);
if (IS_ERR(req)) {
rc = PTR_ERR(req);
goto err;
}
rc = -ENOMEM;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto err_put_req;
arg = &req->misc.cuse_init_in;
arg->major = FUSE_KERNEL_VERSION;
arg->minor = FUSE_KERNEL_MINOR_VERSION;
arg->flags |= CUSE_UNRESTRICTED_IOCTL;
req->in.h.opcode = CUSE_INIT;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct cuse_init_in);
req->in.args[0].value = arg;
req->out.numargs = 2;
req->out.args[0].size = sizeof(struct cuse_init_out);
req->out.args[0].value = &req->misc.cuse_init_out;
req->out.args[1].size = CUSE_INIT_INFO_MAX;
req->out.argvar = 1;
req->out.argpages = 1;
req->pages[0] = page;
req->num_pages = 1;
req->end = cuse_process_init_reply;
fuse_request_send_background(fc, req);
return 0;
err_put_req:
fuse_put_request(fc, req);
err:
return rc;
}
static void cuse_fc_release(struct fuse_conn *fc)
{
struct cuse_conn *cc = fc_to_cc(fc);
kfree(cc);
}
/**
* cuse_channel_open - open method for /dev/cuse
* @inode: inode for /dev/cuse
* @file: file struct being opened
*
* Userland CUSE server can create a CUSE device by opening /dev/cuse
* and replying to the initilaization request kernel sends. This
* function is responsible for handling CUSE device initialization.
* Because the fd opened by this function is used during
* initialization, this function only creates cuse_conn and sends
* init. The rest is delegated to a kthread.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_channel_open(struct inode *inode, struct file *file)
{
struct cuse_conn *cc;
int rc;
/* set up cuse_conn */
cc = kzalloc(sizeof(*cc), GFP_KERNEL);
if (!cc)
return -ENOMEM;
fuse_conn_init(&cc->fc);
INIT_LIST_HEAD(&cc->list);
cc->fc.release = cuse_fc_release;
cc->fc.connected = 1;
cc->fc.blocked = 0;
rc = cuse_send_init(cc);
if (rc) {
fuse_conn_put(&cc->fc);
return rc;
}
file->private_data = &cc->fc; /* channel owns base reference to cc */
return 0;
}
/**
* cuse_channel_release - release method for /dev/cuse
* @inode: inode for /dev/cuse
* @file: file struct being closed
*
* Disconnect the channel, deregister CUSE device and initiate
* destruction by putting the default reference.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
static int cuse_channel_release(struct inode *inode, struct file *file)
{
struct cuse_conn *cc = fc_to_cc(file->private_data);
int rc;
/* remove from the conntbl, no more access from this point on */
spin_lock(&cuse_lock);
list_del_init(&cc->list);
spin_unlock(&cuse_lock);
/* remove device */
if (cc->dev)
device_unregister(cc->dev);
if (cc->cdev) {
unregister_chrdev_region(cc->cdev->dev, 1);
cdev_del(cc->cdev);
}
/* kill connection and shutdown channel */
fuse_conn_kill(&cc->fc);
rc = fuse_dev_release(inode, file); /* puts the base reference */
return rc;
}
static struct file_operations cuse_channel_fops; /* initialized during init */
/**************************************************************************
* Misc stuff and module initializatiion
*
* CUSE exports the same set of attributes to sysfs as fusectl.
*/
static ssize_t cuse_class_waiting_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cuse_conn *cc = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting));
}
static ssize_t cuse_class_abort_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct cuse_conn *cc = dev_get_drvdata(dev);
fuse_abort_conn(&cc->fc);
return count;
}
static struct device_attribute cuse_class_dev_attrs[] = {
__ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL),
__ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store),
{ }
};
static struct miscdevice cuse_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "cuse",
.fops = &cuse_channel_fops,
};
static int __init cuse_init(void)
{
int i, rc;
/* init conntbl */
for (i = 0; i < CUSE_CONNTBL_LEN; i++)
INIT_LIST_HEAD(&cuse_conntbl[i]);
/* inherit and extend fuse_dev_operations */
cuse_channel_fops = fuse_dev_operations;
cuse_channel_fops.owner = THIS_MODULE;
cuse_channel_fops.open = cuse_channel_open;
cuse_channel_fops.release = cuse_channel_release;
cuse_class = class_create(THIS_MODULE, "cuse");
if (IS_ERR(cuse_class))
return PTR_ERR(cuse_class);
cuse_class->dev_attrs = cuse_class_dev_attrs;
rc = misc_register(&cuse_miscdev);
if (rc) {
class_destroy(cuse_class);
return rc;
}
return 0;
}
static void __exit cuse_exit(void)
{
misc_deregister(&cuse_miscdev);
class_destroy(cuse_class);
}
module_init(cuse_init);
module_exit(cuse_exit);
MODULE_AUTHOR("Tejun Heo <tj@kernel.org>");
MODULE_DESCRIPTION("Character device in Userspace");
MODULE_LICENSE("GPL");

View File

@ -46,6 +46,7 @@ struct fuse_req *fuse_request_alloc(void)
fuse_request_init(req);
return req;
}
EXPORT_SYMBOL_GPL(fuse_request_alloc);
struct fuse_req *fuse_request_alloc_nofs(void)
{
@ -124,6 +125,7 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc)
atomic_dec(&fc->num_waiting);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(fuse_get_req);
/*
* Return request in fuse_file->reserved_req. However that may
@ -208,6 +210,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
fuse_request_free(req);
}
}
EXPORT_SYMBOL_GPL(fuse_put_request);
static unsigned len_args(unsigned numargs, struct fuse_arg *args)
{
@ -282,7 +285,7 @@ __releases(&fc->lock)
wake_up_all(&fc->blocked_waitq);
}
if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
fc->connected) {
fc->connected && fc->bdi_initialized) {
clear_bdi_congested(&fc->bdi, READ);
clear_bdi_congested(&fc->bdi, WRITE);
}
@ -400,6 +403,7 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
spin_unlock(&fc->lock);
}
EXPORT_SYMBOL_GPL(fuse_request_send);
static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
struct fuse_req *req)
@ -408,7 +412,8 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
fc->num_background++;
if (fc->num_background == FUSE_MAX_BACKGROUND)
fc->blocked = 1;
if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
fc->bdi_initialized) {
set_bdi_congested(&fc->bdi, READ);
set_bdi_congested(&fc->bdi, WRITE);
}
@ -439,6 +444,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
req->isreply = 1;
fuse_request_send_nowait(fc, req);
}
EXPORT_SYMBOL_GPL(fuse_request_send_background);
/*
* Called under fc->lock
@ -1105,8 +1111,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
}
spin_unlock(&fc->lock);
}
EXPORT_SYMBOL_GPL(fuse_abort_conn);
static int fuse_dev_release(struct inode *inode, struct file *file)
int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc = fuse_get_conn(file);
if (fc) {
@ -1120,6 +1127,7 @@ static int fuse_dev_release(struct inode *inode, struct file *file)
return 0;
}
EXPORT_SYMBOL_GPL(fuse_dev_release);
static int fuse_dev_fasync(int fd, struct file *file, int on)
{
@ -1142,6 +1150,7 @@ const struct file_operations fuse_dev_operations = {
.release = fuse_dev_release,
.fasync = fuse_dev_fasync,
};
EXPORT_SYMBOL_GPL(fuse_dev_operations);
static struct miscdevice fuse_miscdevice = {
.minor = FUSE_MINOR,

View File

@ -361,19 +361,6 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
return ERR_PTR(err);
}
/*
* Synchronous release for the case when something goes wrong in CREATE_OPEN
*/
static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff,
u64 nodeid, int flags)
{
fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE);
ff->reserved_req->force = 1;
fuse_request_send(fc, ff->reserved_req);
fuse_put_request(fc, ff->reserved_req);
kfree(ff);
}
/*
* Atomic create+open operation
*
@ -445,12 +432,14 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
goto out_free_ff;
fuse_put_request(fc, req);
ff->fh = outopen.fh;
ff->nodeid = outentry.nodeid;
ff->open_flags = outopen.open_flags;
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
&outentry.attr, entry_attr_timeout(&outentry), 0);
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
ff->fh = outopen.fh;
fuse_sync_release(fc, ff, outentry.nodeid, flags);
fuse_sync_release(ff, flags);
fuse_send_forget(fc, forget_req, outentry.nodeid, 1);
return -ENOMEM;
}
@ -460,11 +449,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
fuse_invalidate_attr(dir);
file = lookup_instantiate_filp(nd, entry, generic_file_open);
if (IS_ERR(file)) {
ff->fh = outopen.fh;
fuse_sync_release(fc, ff, outentry.nodeid, flags);
fuse_sync_release(ff, flags);
return PTR_ERR(file);
}
fuse_finish_open(inode, file, ff, &outopen);
file->private_data = fuse_file_get(ff);
fuse_finish_open(inode, file);
return 0;
out_free_ff:
@ -1035,7 +1024,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir)
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR);
fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR);
fuse_request_send(fc, req);
nbytes = req->out.args[0].size;
err = req->out.h.error;
@ -1101,12 +1090,14 @@ static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
static int fuse_dir_open(struct inode *inode, struct file *file)
{
return fuse_open_common(inode, file, 1);
return fuse_open_common(inode, file, true);
}
static int fuse_dir_release(struct inode *inode, struct file *file)
{
return fuse_release_common(inode, file, 1);
fuse_release_common(file, FUSE_RELEASEDIR);
return 0;
}
static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync)

View File

@ -12,13 +12,13 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
static const struct file_operations fuse_direct_io_file_operations;
static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
struct fuse_open_out *outargp)
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_open_in inarg;
struct fuse_req *req;
int err;
@ -31,8 +31,8 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
req->in.h.nodeid = get_node_id(inode);
req->in.h.opcode = opcode;
req->in.h.nodeid = nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
@ -49,22 +49,27 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
if (ff) {
ff->reserved_req = fuse_request_alloc();
if (!ff->reserved_req) {
kfree(ff);
return NULL;
} else {
INIT_LIST_HEAD(&ff->write_entry);
atomic_set(&ff->count, 0);
spin_lock(&fc->lock);
ff->kh = ++fc->khctr;
spin_unlock(&fc->lock);
}
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
if (unlikely(!ff))
return NULL;
ff->fc = fc;
ff->reserved_req = fuse_request_alloc();
if (unlikely(!ff->reserved_req)) {
kfree(ff);
return NULL;
}
INIT_LIST_HEAD(&ff->write_entry);
atomic_set(&ff->count, 0);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
spin_lock(&fc->lock);
ff->kh = ++fc->khctr;
spin_unlock(&fc->lock);
return ff;
}
@ -74,7 +79,7 @@ void fuse_file_free(struct fuse_file *ff)
kfree(ff);
}
static struct fuse_file *fuse_file_get(struct fuse_file *ff)
struct fuse_file *fuse_file_get(struct fuse_file *ff)
{
atomic_inc(&ff->count);
return ff;
@ -82,41 +87,66 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
dput(req->misc.release.dentry);
mntput(req->misc.release.vfsmount);
path_put(&req->misc.release.path);
}
static void fuse_file_put(struct fuse_file *ff)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
struct inode *inode = req->misc.release.dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
req->end = fuse_release_end;
fuse_request_send_background(fc, req);
fuse_request_send_background(ff->fc, req);
kfree(ff);
}
}
void fuse_finish_open(struct inode *inode, struct file *file,
struct fuse_file *ff, struct fuse_open_out *outarg)
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir)
{
if (outarg->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
if (outarg->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
ff->fh = outarg->fh;
file->private_data = fuse_file_get(ff);
}
int fuse_open_common(struct inode *inode, struct file *file, int isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_open_out outarg;
struct fuse_file *ff;
int err;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
ff = fuse_file_alloc(fc);
if (!ff)
return -ENOMEM;
err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
if (err) {
fuse_file_free(ff);
return err;
}
if (isdir)
outarg.open_flags &= ~FOPEN_DIRECT_IO;
ff->fh = outarg.fh;
ff->nodeid = nodeid;
ff->open_flags = outarg.open_flags;
file->private_data = fuse_file_get(ff);
return 0;
}
EXPORT_SYMBOL_GPL(fuse_do_open);
void fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
if (ff->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
}
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
/* VFS checks this, but only _after_ ->open() */
if (file->f_flags & O_DIRECT)
@ -126,79 +156,86 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
if (err)
return err;
ff = fuse_file_alloc(fc);
if (!ff)
return -ENOMEM;
err = fuse_send_open(inode, file, isdir, &outarg);
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (err)
fuse_file_free(ff);
else {
if (isdir)
outarg.open_flags &= ~FOPEN_DIRECT_IO;
fuse_finish_open(inode, file, ff, &outarg);
}
return err;
return err;
fuse_finish_open(inode, file);
return 0;
}
void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
struct fuse_req *req = ff->reserved_req;
struct fuse_release_in *inarg = &req->misc.release.in;
spin_lock(&fc->lock);
list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
wake_up_interruptible_sync(&ff->poll_wait);
inarg->fh = ff->fh;
inarg->flags = flags;
req->in.h.opcode = opcode;
req->in.h.nodeid = nodeid;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_release_in);
req->in.args[0].value = inarg;
}
int fuse_release_common(struct inode *inode, struct file *file, int isdir)
void fuse_release_common(struct file *file, int opcode)
{
struct fuse_file *ff = file->private_data;
if (ff) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req = ff->reserved_req;
struct fuse_file *ff;
struct fuse_req *req;
fuse_release_fill(ff, get_node_id(inode), file->f_flags,
isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
ff = file->private_data;
if (unlikely(!ff))
return;
/* Hold vfsmount and dentry until release is finished */
req->misc.release.vfsmount = mntget(file->f_path.mnt);
req->misc.release.dentry = dget(file->f_path.dentry);
req = ff->reserved_req;
fuse_prepare_release(ff, file->f_flags, opcode);
spin_lock(&fc->lock);
list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
/* Hold vfsmount and dentry until release is finished */
path_get(&file->f_path);
req->misc.release.path = file->f_path;
wake_up_interruptible_sync(&ff->poll_wait);
/*
* Normally this will send the RELEASE request,
* however if some asynchronous READ or WRITE requests
* are outstanding, the sending will be delayed
*/
fuse_file_put(ff);
}
/* Return value is ignored by VFS */
return 0;
/*
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
*/
fuse_file_put(ff);
}
static int fuse_open(struct inode *inode, struct file *file)
{
return fuse_open_common(inode, file, 0);
return fuse_open_common(inode, file, false);
}
static int fuse_release(struct inode *inode, struct file *file)
{
return fuse_release_common(inode, file, 0);
fuse_release_common(file, FUSE_RELEASE);
/* return value is ignored by VFS */
return 0;
}
void fuse_sync_release(struct fuse_file *ff, int flags)
{
WARN_ON(atomic_read(&ff->count) > 1);
fuse_prepare_release(ff, flags, FUSE_RELEASE);
ff->reserved_req->force = 1;
fuse_request_send(ff->fc, ff->reserved_req);
fuse_put_request(ff->fc, ff->reserved_req);
kfree(ff);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
/*
* Scramble the ID space with XTEA, so that the value of the files_struct
* pointer is not exposed to userspace.
@ -371,8 +408,8 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
return fuse_fsync_common(file, de, datasync, 0);
}
void fuse_read_fill(struct fuse_req *req, struct file *file,
struct inode *inode, loff_t pos, size_t count, int opcode)
void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
size_t count, int opcode)
{
struct fuse_read_in *inarg = &req->misc.read.in;
struct fuse_file *ff = file->private_data;
@ -382,7 +419,7 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
inarg->size = count;
inarg->flags = file->f_flags;
req->in.h.opcode = opcode;
req->in.h.nodeid = get_node_id(inode);
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_read_in);
req->in.args[0].value = inarg;
@ -392,12 +429,12 @@ void fuse_read_fill(struct fuse_req *req, struct file *file,
}
static size_t fuse_send_read(struct fuse_req *req, struct file *file,
struct inode *inode, loff_t pos, size_t count,
fl_owner_t owner)
loff_t pos, size_t count, fl_owner_t owner)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
fuse_read_fill(req, file, pos, count, FUSE_READ);
if (owner != NULL) {
struct fuse_read_in *inarg = &req->misc.read.in;
@ -455,7 +492,7 @@ static int fuse_readpage(struct file *file, struct page *page)
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
num_read = fuse_send_read(req, file, inode, pos, count, NULL);
num_read = fuse_send_read(req, file, pos, count, NULL);
err = req->out.h.error;
fuse_put_request(fc, req);
@ -504,19 +541,18 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
fuse_file_put(req->ff);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file,
struct inode *inode)
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
loff_t pos = page_offset(req->pages[0]);
size_t count = req->num_pages << PAGE_CACHE_SHIFT;
req->out.argpages = 1;
req->out.page_zeroing = 1;
fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
fuse_read_fill(req, file, pos, count, FUSE_READ);
req->misc.read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
struct fuse_file *ff = file->private_data;
req->ff = fuse_file_get(ff);
req->end = fuse_readpages_end;
fuse_request_send_background(fc, req);
@ -546,7 +582,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
(req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
(req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
req->pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_send_readpages(req, data->file, inode);
fuse_send_readpages(req, data->file);
data->req = req = fuse_get_req(fc);
if (IS_ERR(req)) {
unlock_page(page);
@ -580,7 +616,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping,
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
if (data.req->num_pages)
fuse_send_readpages(data.req, file, inode);
fuse_send_readpages(data.req, file);
else
fuse_put_request(fc, data.req);
}
@ -607,24 +643,19 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
return generic_file_aio_read(iocb, iov, nr_segs, pos);
}
static void fuse_write_fill(struct fuse_req *req, struct file *file,
struct fuse_file *ff, struct inode *inode,
loff_t pos, size_t count, int writepage)
static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
loff_t pos, size_t count)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
struct fuse_write_out *outarg = &req->misc.write.out;
memset(inarg, 0, sizeof(struct fuse_write_in));
inarg->fh = ff->fh;
inarg->offset = pos;
inarg->size = count;
inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
inarg->flags = file ? file->f_flags : 0;
req->in.h.opcode = FUSE_WRITE;
req->in.h.nodeid = get_node_id(inode);
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 2;
if (fc->minor < 9)
if (ff->fc->minor < 9)
req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
else
req->in.args[0].size = sizeof(struct fuse_write_in);
@ -636,13 +667,15 @@ static void fuse_write_fill(struct fuse_req *req, struct file *file,
}
static size_t fuse_send_write(struct fuse_req *req, struct file *file,
struct inode *inode, loff_t pos, size_t count,
fl_owner_t owner)
loff_t pos, size_t count, fl_owner_t owner)
{
struct fuse_conn *fc = get_fuse_conn(inode);
fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_write_in *inarg = &req->misc.write.in;
fuse_write_fill(req, ff, pos, count);
inarg->flags = file->f_flags;
if (owner != NULL) {
struct fuse_write_in *inarg = &req->misc.write.in;
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
@ -700,7 +733,7 @@ static int fuse_buffered_write(struct file *file, struct inode *inode,
req->num_pages = 1;
req->pages[0] = page;
req->page_offset = offset;
nres = fuse_send_write(req, file, inode, pos, count, NULL);
nres = fuse_send_write(req, file, pos, count, NULL);
err = req->out.h.error;
fuse_put_request(fc, req);
if (!err && !nres)
@ -741,7 +774,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
for (i = 0; i < req->num_pages; i++)
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
res = fuse_send_write(req, file, inode, pos, count, NULL);
res = fuse_send_write(req, file, pos, count, NULL);
offset = req->page_offset;
count = res;
@ -979,25 +1012,23 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
return 0;
}
static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int write)
ssize_t fuse_direct_io(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int write)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
size_t nmax = write ? fc->max_write : fc->max_read;
loff_t pos = *ppos;
ssize_t res = 0;
struct fuse_req *req;
if (is_bad_inode(inode))
return -EIO;
req = fuse_get_req(fc);
if (IS_ERR(req))
return PTR_ERR(req);
while (count) {
size_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
int err = fuse_get_user_pages(req, buf, &nbytes, write);
if (err) {
@ -1006,11 +1037,10 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
}
if (write)
nres = fuse_send_write(req, file, inode, pos, nbytes,
current->files);
nres = fuse_send_write(req, file, pos, nbytes, owner);
else
nres = fuse_send_read(req, file, inode, pos, nbytes,
current->files);
nres = fuse_send_read(req, file, pos, nbytes, owner);
fuse_release_user_pages(req, !write);
if (req->out.h.error) {
if (!res)
@ -1034,20 +1064,27 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
}
}
fuse_put_request(fc, req);
if (res > 0) {
if (write)
fuse_write_update_size(inode, pos);
if (res > 0)
*ppos = pos;
}
fuse_invalidate_attr(inode);
return res;
}
EXPORT_SYMBOL_GPL(fuse_direct_io);
static ssize_t fuse_direct_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return fuse_direct_io(file, buf, count, ppos, 0);
ssize_t res;
struct inode *inode = file->f_path.dentry->d_inode;
if (is_bad_inode(inode))
return -EIO;
res = fuse_direct_io(file, buf, count, ppos, 0);
fuse_invalidate_attr(inode);
return res;
}
static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
@ -1055,12 +1092,22 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
{
struct inode *inode = file->f_path.dentry->d_inode;
ssize_t res;
if (is_bad_inode(inode))
return -EIO;
/* Don't allow parallel writes to the same file */
mutex_lock(&inode->i_mutex);
res = generic_write_checks(file, ppos, &count, 0);
if (!res)
if (!res) {
res = fuse_direct_io(file, buf, count, ppos, 1);
if (res > 0)
fuse_write_update_size(inode, *ppos);
}
mutex_unlock(&inode->i_mutex);
fuse_invalidate_attr(inode);
return res;
}
@ -1177,9 +1224,10 @@ static int fuse_writepage_locked(struct page *page)
req->ff = fuse_file_get(ff);
spin_unlock(&fc->lock);
fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
fuse_write_fill(req, ff, page_offset(page), 0);
copy_highpage(tmp_page, page);
req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
req->in.argpages = 1;
req->num_pages = 1;
req->pages[0] = tmp_page;
@ -1603,12 +1651,11 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
* limits ioctl data transfers to well-formed ioctls and is the forced
* behavior for all FUSE servers.
*/
static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags)
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_conn *fc = ff->fc;
struct fuse_ioctl_in inarg = {
.fh = ff->fh,
.cmd = cmd,
@ -1627,13 +1674,6 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
/* assume all the iovs returned by client always fits in a page */
BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
if (!fuse_allow_task(fc, current))
return -EACCES;
err = -EIO;
if (is_bad_inode(inode))
goto out;
err = -ENOMEM;
pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL);
iov_page = alloc_page(GFP_KERNEL);
@ -1694,7 +1734,7 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
/* okay, let's send it to the client */
req->in.h.opcode = FUSE_IOCTL;
req->in.h.nodeid = get_node_id(inode);
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
@ -1777,17 +1817,33 @@ static long fuse_file_do_ioctl(struct file *file, unsigned int cmd,
return err ? err : outarg.result;
}
EXPORT_SYMBOL_GPL(fuse_do_ioctl);
static long fuse_file_ioctl_common(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_conn *fc = get_fuse_conn(inode);
if (!fuse_allow_task(fc, current))
return -EACCES;
if (is_bad_inode(inode))
return -EIO;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static long fuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_file_do_ioctl(file, cmd, arg, 0);
return fuse_file_ioctl_common(file, cmd, arg, 0);
}
static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT);
return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
}
/*
@ -1841,11 +1897,10 @@ static void fuse_register_polled_file(struct fuse_conn *fc,
spin_unlock(&fc->lock);
}
static unsigned fuse_file_poll(struct file *file, poll_table *wait)
unsigned fuse_file_poll(struct file *file, poll_table *wait)
{
struct inode *inode = file->f_dentry->d_inode;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_conn *fc = ff->fc;
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
struct fuse_req *req;
@ -1870,7 +1925,7 @@ static unsigned fuse_file_poll(struct file *file, poll_table *wait)
return PTR_ERR(req);
req->in.h.opcode = FUSE_POLL;
req->in.h.nodeid = get_node_id(inode);
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
@ -1889,6 +1944,7 @@ static unsigned fuse_file_poll(struct file *file, poll_table *wait)
}
return POLLERR;
}
EXPORT_SYMBOL_GPL(fuse_file_poll);
/*
* This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and

View File

@ -97,8 +97,13 @@ struct fuse_inode {
struct list_head writepages;
};
struct fuse_conn;
/** FUSE specific file data */
struct fuse_file {
/** Fuse connection for this file */
struct fuse_conn *fc;
/** Request reserved for flush and release */
struct fuse_req *reserved_req;
@ -108,9 +113,15 @@ struct fuse_file {
/** File handle used by userspace */
u64 fh;
/** Node id of this file */
u64 nodeid;
/** Refcount */
atomic_t count;
/** FOPEN_* flags returned by open */
u32 open_flags;
/** Entry on inode's write_files list */
struct list_head write_entry;
@ -185,8 +196,6 @@ enum fuse_req_state {
FUSE_REQ_FINISHED
};
struct fuse_conn;
/**
* A request to the client
*/
@ -248,11 +257,12 @@ struct fuse_req {
struct fuse_forget_in forget_in;
struct {
struct fuse_release_in in;
struct vfsmount *vfsmount;
struct dentry *dentry;
struct path path;
} release;
struct fuse_init_in init_in;
struct fuse_init_out init_out;
struct cuse_init_in cuse_init_in;
struct cuse_init_out cuse_init_out;
struct {
struct fuse_read_in in;
u64 attr_ver;
@ -386,6 +396,9 @@ struct fuse_conn {
/** Filesystem supports NFS exporting. Only set in INIT */
unsigned export_support:1;
/** Set if bdi is valid */
unsigned bdi_initialized:1;
/*
* The following bitfields are only for optimization purposes
* and hence races in setting them will not cause malfunction
@ -515,25 +528,24 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
* Initialize READ or READDIR request
*/
void fuse_read_fill(struct fuse_req *req, struct file *file,
struct inode *inode, loff_t pos, size_t count, int opcode);
loff_t pos, size_t count, int opcode);
/**
* Send OPEN or OPENDIR request
*/
int fuse_open_common(struct inode *inode, struct file *file, int isdir);
int fuse_open_common(struct inode *inode, struct file *file, bool isdir);
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc);
struct fuse_file *fuse_file_get(struct fuse_file *ff);
void fuse_file_free(struct fuse_file *ff);
void fuse_finish_open(struct inode *inode, struct file *file,
struct fuse_file *ff, struct fuse_open_out *outarg);
void fuse_finish_open(struct inode *inode, struct file *file);
/** Fill in ff->reserved_req with a RELEASE request */
void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode);
void fuse_sync_release(struct fuse_file *ff, int flags);
/**
* Send RELEASE or RELEASEDIR request
*/
int fuse_release_common(struct inode *inode, struct file *file, int isdir);
void fuse_release_common(struct file *file, int opcode);
/**
* Send FSYNC or FSYNCDIR request
@ -652,10 +664,12 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
*/
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc);
void fuse_conn_kill(struct fuse_conn *fc);
/**
* Initialize fuse_conn
*/
int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb);
void fuse_conn_init(struct fuse_conn *fc);
/**
* Release reference to fuse_conn
@ -694,4 +708,13 @@ void fuse_release_nowrite(struct inode *inode);
u64 fuse_get_attr_version(struct fuse_conn *fc);
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir);
ssize_t fuse_direct_io(struct file *file, const char __user *buf,
size_t count, loff_t *ppos, int write);
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags);
unsigned fuse_file_poll(struct file *file, poll_table *wait);
int fuse_dev_release(struct inode *inode, struct file *file);
#endif /* _FS_FUSE_I_H */

View File

@ -277,11 +277,14 @@ static void fuse_send_destroy(struct fuse_conn *fc)
}
}
static void fuse_put_super(struct super_block *sb)
static void fuse_bdi_destroy(struct fuse_conn *fc)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc->bdi_initialized)
bdi_destroy(&fc->bdi);
}
fuse_send_destroy(fc);
void fuse_conn_kill(struct fuse_conn *fc)
{
spin_lock(&fc->lock);
fc->connected = 0;
fc->blocked = 0;
@ -295,7 +298,16 @@ static void fuse_put_super(struct super_block *sb)
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
mutex_unlock(&fuse_mutex);
bdi_destroy(&fc->bdi);
fuse_bdi_destroy(fc);
}
EXPORT_SYMBOL_GPL(fuse_conn_kill);
static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
fuse_send_destroy(fc);
fuse_conn_kill(fc);
fuse_conn_put(fc);
}
@ -466,10 +478,8 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
return 0;
}
int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb)
void fuse_conn_init(struct fuse_conn *fc)
{
int err;
memset(fc, 0, sizeof(*fc));
spin_lock_init(&fc->lock);
mutex_init(&fc->inst_mutex);
@ -484,49 +494,12 @@ int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb)
INIT_LIST_HEAD(&fc->bg_queue);
INIT_LIST_HEAD(&fc->entry);
atomic_set(&fc->num_waiting, 0);
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
fc->khctr = 0;
fc->polled_files = RB_ROOT;
fc->dev = sb->s_dev;
err = bdi_init(&fc->bdi);
if (err)
goto error_mutex_destroy;
if (sb->s_bdev) {
err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
MAJOR(fc->dev), MINOR(fc->dev));
} else {
err = bdi_register_dev(&fc->bdi, fc->dev);
}
if (err)
goto error_bdi_destroy;
/*
* For a single fuse filesystem use max 1% of dirty +
* writeback threshold.
*
* This gives about 1M of write buffer for memory maps on a
* machine with 1G and 10% dirty_ratio, which should be more
* than enough.
*
* Privileged users can raise it by writing to
*
* /sys/class/bdi/<bdi>/max_ratio
*/
bdi_set_max_ratio(&fc->bdi, 1);
fc->reqctr = 0;
fc->blocked = 1;
fc->attr_version = 1;
get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
return 0;
error_bdi_destroy:
bdi_destroy(&fc->bdi);
error_mutex_destroy:
mutex_destroy(&fc->inst_mutex);
return err;
}
EXPORT_SYMBOL_GPL(fuse_conn_init);
@ -539,12 +512,14 @@ void fuse_conn_put(struct fuse_conn *fc)
fc->release(fc);
}
}
EXPORT_SYMBOL_GPL(fuse_conn_put);
struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
{
atomic_inc(&fc->count);
return fc;
}
EXPORT_SYMBOL_GPL(fuse_conn_get);
static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode)
{
@ -797,6 +772,48 @@ static void fuse_free_conn(struct fuse_conn *fc)
kfree(fc);
}
static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
{
int err;
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
/* fuse does it's own writeback accounting */
fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
err = bdi_init(&fc->bdi);
if (err)
return err;
fc->bdi_initialized = 1;
if (sb->s_bdev) {
err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
MAJOR(fc->dev), MINOR(fc->dev));
} else {
err = bdi_register_dev(&fc->bdi, fc->dev);
}
if (err)
return err;
/*
* For a single fuse filesystem use max 1% of dirty +
* writeback threshold.
*
* This gives about 1M of write buffer for memory maps on a
* machine with 1G and 10% dirty_ratio, which should be more
* than enough.
*
* Privileged users can raise it by writing to
*
* /sys/class/bdi/<bdi>/max_ratio
*/
bdi_set_max_ratio(&fc->bdi, 1);
return 0;
}
static int fuse_fill_super(struct super_block *sb, void *data, int silent)
{
struct fuse_conn *fc;
@ -843,11 +860,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (!fc)
goto err_fput;
err = fuse_conn_init(fc, sb);
if (err) {
kfree(fc);
goto err_fput;
}
fuse_conn_init(fc);
fc->dev = sb->s_dev;
err = fuse_bdi_init(fc, sb);
if (err)
goto err_put_conn;
fc->release = fuse_free_conn;
fc->flags = d.flags;
@ -911,7 +929,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
err_put_root:
dput(root_dentry);
err_put_conn:
bdi_destroy(&fc->bdi);
fuse_bdi_destroy(fc);
fuse_conn_put(fc);
err_fput:
fput(file);

View File

@ -120,6 +120,13 @@ struct fuse_file_lock {
#define FUSE_EXPORT_SUPPORT (1 << 4)
#define FUSE_BIG_WRITES (1 << 5)
/**
* CUSE INIT request/reply flags
*
* CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl
*/
#define CUSE_UNRESTRICTED_IOCTL (1 << 0)
/**
* Release flags
*/
@ -210,6 +217,9 @@ enum fuse_opcode {
FUSE_DESTROY = 38,
FUSE_IOCTL = 39,
FUSE_POLL = 40,
/* CUSE specific operations */
CUSE_INIT = 4096,
};
enum fuse_notify_code {
@ -401,6 +411,27 @@ struct fuse_init_out {
__u32 max_write;
};
#define CUSE_INIT_INFO_MAX 4096
struct cuse_init_in {
__u32 major;
__u32 minor;
__u32 unused;
__u32 flags;
};
struct cuse_init_out {
__u32 major;
__u32 minor;
__u32 unused;
__u32 flags;
__u32 max_read;
__u32 max_write;
__u32 dev_major; /* chardev major */
__u32 dev_minor; /* chardev minor */
__u32 spare[10];
};
struct fuse_interrupt_in {
__u64 unique;
};