dect
/
linux-2.6
Archived
13
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6:
  cifs: make cifs_set_oplock_level() take a cifsInodeInfo pointer
  cifs: dereferencing first then checking
  cifs: trivial comment fix: tlink_tree is now a rbtree
  [CIFS] Cleanup unused variable build warning
  cifs: convert tlink_tree to a rbtree
  cifs: store pointer to master tlink in superblock (try #2)
  cifs: trivial doc fix: note setlease implemented
  CIFS: Add cifs_set_oplock_level
  FS: cifs, remove unneeded NULL tests
This commit is contained in:
Linus Torvalds 2010-11-05 14:17:01 -07:00
commit 2e5c36722d
9 changed files with 152 additions and 161 deletions

View File

@ -81,7 +81,7 @@ u) DOS attrs - returned as pseudo-xattr in Samba format (check VFAT and NTFS for
v) mount check for unmatched uids
w) Add support for new vfs entry points for setlease and fallocate
w) Add support for new vfs entry point for fallocate
x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of
processes can proceed better in parallel (on the server)

View File

@ -15,7 +15,7 @@
* the GNU Lesser General Public License for more details.
*
*/
#include <linux/radix-tree.h>
#include <linux/rbtree.h>
#ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H
@ -42,9 +42,9 @@
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
struct cifs_sb_info {
struct radix_tree_root tlink_tree;
#define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */
struct rb_root tlink_tree;
spinlock_t tlink_tree_lock;
struct tcon_link *master_tlink;
struct nls_table *local_nls;
unsigned int rsize;
unsigned int wsize;

View File

@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data,
return -ENOMEM;
spin_lock_init(&cifs_sb->tlink_tree_lock);
INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
cifs_sb->tlink_tree = RB_ROOT;
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc) {
@ -321,8 +321,7 @@ cifs_alloc_inode(struct super_block *sb)
/* Until the file is open and we have gotten oplock
info back from the server, can not assume caching of
file data or metadata */
cifs_inode->clientCanCacheRead = false;
cifs_inode->clientCanCacheAll = false;
cifs_set_oplock_level(cifs_inode, 0);
cifs_inode->delete_pending = false;
cifs_inode->invalid_mapping = false;
cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */

View File

@ -336,7 +336,8 @@ struct cifsTconInfo {
* "get" on the container.
*/
struct tcon_link {
unsigned long tl_index;
struct rb_node tl_rbnode;
uid_t tl_uid;
unsigned long tl_flags;
#define TCON_LINK_MASTER 0
#define TCON_LINK_PENDING 1

View File

@ -104,6 +104,7 @@ extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
int offset);
extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
struct file *file, struct tcon_link *tlink,

View File

@ -116,6 +116,7 @@ struct smb_vol {
static int ipv4_connect(struct TCP_Server_Info *server);
static int ipv6_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work);
/*
@ -2900,24 +2901,16 @@ remote_path_check:
goto mount_fail_check;
}
tlink->tl_index = pSesInfo->linux_uid;
tlink->tl_uid = pSesInfo->linux_uid;
tlink->tl_tcon = tcon;
tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rc = radix_tree_preload(GFP_KERNEL);
if (rc == -ENOMEM) {
kfree(tlink);
goto mount_fail_check;
}
cifs_sb->master_tlink = tlink;
spin_lock(&cifs_sb->tlink_tree_lock);
radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
CIFS_TLINK_MASTER_TAG);
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
@ -3107,32 +3100,25 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
int
cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
{
int i, ret;
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
char *tmp;
struct tcon_link *tlink[8];
unsigned long index = 0;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
do {
spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
(void **)tlink, index,
ARRAY_SIZE(tlink));
/* increment index for next pass */
if (ret > 0)
index = tlink[ret - 1]->tl_index + 1;
for (i = 0; i < ret; i++) {
cifs_get_tlink(tlink[i]);
clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
radix_tree_delete(&cifs_sb->tlink_tree,
tlink[i]->tl_index);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
spin_lock(&cifs_sb->tlink_tree_lock);
while ((node = rb_first(root))) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(node, root);
for (i = 0; i < ret; i++)
cifs_put_tlink(tlink[i]);
} while (ret != 0);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
tmp = cifs_sb->prepath;
cifs_sb->prepathlen = 0;
@ -3271,22 +3257,10 @@ out:
return tcon;
}
static struct tcon_link *
static inline struct tcon_link *
cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
{
struct tcon_link *tlink;
unsigned int ret;
spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
0, 1, CIFS_TLINK_MASTER_TAG);
spin_unlock(&cifs_sb->tlink_tree_lock);
/* the master tcon should always be present */
if (ret == 0)
BUG();
return tlink;
return cifs_sb->master_tlink;
}
struct cifsTconInfo *
@ -3302,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused)
return signal_pending(current) ? -ERESTARTSYS : 0;
}
/* find and return a tlink with given uid */
static struct tcon_link *
tlink_rb_search(struct rb_root *root, uid_t uid)
{
struct rb_node *node = root->rb_node;
struct tcon_link *tlink;
while (node) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
if (tlink->tl_uid > uid)
node = node->rb_left;
else if (tlink->tl_uid < uid)
node = node->rb_right;
else
return tlink;
}
return NULL;
}
/* insert a tcon_link into the tree */
static void
tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tcon_link *tlink;
while (*new) {
tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
parent = *new;
if (tlink->tl_uid > new_tlink->tl_uid)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_tlink->tl_rbnode, parent, new);
rb_insert_color(&new_tlink->tl_rbnode, root);
}
/*
* Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
* current task.
@ -3309,7 +3324,7 @@ cifs_sb_tcon_pending_wait(void *unused)
* If the superblock doesn't refer to a multiuser mount, then just return
* the master tcon for the mount.
*
* First, search the radix tree for an existing tcon for this fsuid. If one
* First, search the rbtree for an existing tcon for this fsuid. If one
* exists, then check to see if it's pending construction. If it is then wait
* for construction to complete. Once it's no longer pending, check to see if
* it failed and either return an error or retry construction, depending on
@ -3322,14 +3337,14 @@ struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{
int ret;
unsigned long fsuid = (unsigned long) current_fsuid();
uid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock);
tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink)
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
@ -3338,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
if (newtlink == NULL)
return ERR_PTR(-ENOMEM);
newtlink->tl_index = fsuid;
newtlink->tl_uid = fsuid;
newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
cifs_get_tlink(newtlink);
ret = radix_tree_preload(GFP_KERNEL);
if (ret != 0) {
kfree(newtlink);
return ERR_PTR(ret);
}
spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */
tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) {
cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
kfree(newtlink);
goto wait_for_construction;
}
ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
if (ret) {
kfree(newtlink);
return ERR_PTR(ret);
}
tlink = newtlink;
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
} else {
wait_for_construction:
ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@ -3413,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work)
{
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work);
struct tcon_link *tlink[8];
unsigned long now = jiffies;
unsigned long index = 0;
int i, ret;
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node = rb_first(root);
struct rb_node *tmp;
struct tcon_link *tlink;
/*
* Because we drop the spinlock in the loop in order to put the tlink
* it's not guarded against removal of links from the tree. The only
* places that remove entries from the tree are this function and
* umounts. Because this function is non-reentrant and is canceled
* before umount can proceed, this is safe.
*/
spin_lock(&cifs_sb->tlink_tree_lock);
node = rb_first(root);
while (node != NULL) {
tmp = node;
node = rb_next(tmp);
tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
atomic_read(&tlink->tl_count) != 0 ||
time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
continue;
cifs_get_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rb_erase(tmp, root);
do {
spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
(void **)tlink, index,
ARRAY_SIZE(tlink));
/* increment index for next pass */
if (ret > 0)
index = tlink[ret - 1]->tl_index + 1;
for (i = 0; i < ret; i++) {
if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
atomic_read(&tlink[i]->tl_count) != 0 ||
time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
now)) {
tlink[i] = NULL;
continue;
}
cifs_get_tlink(tlink[i]);
clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
radix_tree_delete(&cifs_sb->tlink_tree,
tlink[i]->tl_index);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
for (i = 0; i < ret; i++) {
if (tlink[i] != NULL)
cifs_put_tlink(tlink[i]);
}
} while (ret != 0);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);

View File

@ -146,12 +146,7 @@ client_can_cache:
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, NULL);
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
cFYI(1, "Exclusive Oplock granted on inode %p", inode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
cifs_set_oplock_level(pCifsInode, oplock);
return rc;
}
@ -253,12 +248,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
spin_unlock(&cifs_file_list_lock);
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
cFYI(1, "Exclusive Oplock inode %p", inode);
} else if ((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = true;
cifs_set_oplock_level(pCifsInode, oplock);
file->private_data = pCifsFile;
return pCifsFile;
@ -271,8 +261,9 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
*/
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
struct inode *inode = cifs_file->dentry->d_inode;
struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifsLockInfo *li, *tmp;
spin_lock(&cifs_file_list_lock);
@ -288,8 +279,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
if (list_empty(&cifsi->openFileList)) {
cFYI(1, "closing last open instance for inode %p",
cifs_file->dentry->d_inode);
cifsi->clientCanCacheRead = false;
cifsi->clientCanCacheAll = false;
cifs_set_oplock_level(cifsi, 0);
}
spin_unlock(&cifs_file_list_lock);
@ -607,8 +597,6 @@ reopen_success:
rc = filemap_write_and_wait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
pCifsInode->clientCanCacheAll = false;
pCifsInode->clientCanCacheRead = false;
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode,
full_path, inode->i_sb, xid);
@ -622,18 +610,9 @@ reopen_success:
invalidate the current end of file on the server
we can not go to the server to get the new inod
info */
if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = true;
pCifsInode->clientCanCacheRead = true;
cFYI(1, "Exclusive Oplock granted on inode %p",
pCifsFile->dentry->d_inode);
} else if ((oplock & 0xF) == OPLOCK_READ) {
pCifsInode->clientCanCacheRead = true;
pCifsInode->clientCanCacheAll = false;
} else {
pCifsInode->clientCanCacheRead = false;
pCifsInode->clientCanCacheAll = false;
}
cifs_set_oplock_level(pCifsInode, oplock);
cifs_relock_file(pCifsFile);
reopen_error_exit:
@ -775,12 +754,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
if (file->private_data == NULL) {
rc = -EBADF;
FreeXid(xid);
return rc;
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
if ((tcon->ses->capabilities & CAP_UNIX) &&
@ -956,6 +929,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
ssize_t cifs_user_write(struct file *file, const char __user *write_data,
size_t write_size, loff_t *poffset)
{
struct inode *inode = file->f_path.dentry->d_inode;
int rc = 0;
unsigned int bytes_written = 0;
unsigned int total_written;
@ -963,7 +937,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
struct cifsTconInfo *pTcon;
int xid, long_op;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
struct cifsInodeInfo *cifsi = CIFS_I(inode);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@ -1029,21 +1003,17 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data,
cifs_stats_bytes_written(pTcon, total_written);
/* since the write may have blocked check these pointers again */
if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
struct inode *inode = file->f_path.dentry->d_inode;
/* Do not update local mtime - server will set its actual value on write
* inode->i_ctime = inode->i_mtime =
* current_fs_time(inode->i_sb);*/
if (total_written > 0) {
spin_lock(&inode->i_lock);
if (*poffset > file->f_path.dentry->d_inode->i_size)
i_size_write(file->f_path.dentry->d_inode,
*poffset);
spin_unlock(&inode->i_lock);
}
mark_inode_dirty_sync(file->f_path.dentry->d_inode);
* inode->i_ctime = inode->i_mtime =
* current_fs_time(inode->i_sb);*/
if (total_written > 0) {
spin_lock(&inode->i_lock);
if (*poffset > inode->i_size)
i_size_write(inode, *poffset);
spin_unlock(&inode->i_lock);
}
mark_inode_dirty_sync(inode);
FreeXid(xid);
return total_written;
}
@ -1178,7 +1148,7 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
bool fsuid_only)
{
struct cifsFileInfo *open_file;
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc;
@ -1192,6 +1162,8 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
return NULL;
}
cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
/* only filter by fsuid on multiuser mounts */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
fsuid_only = false;

View File

@ -63,8 +63,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
#ifdef CONFIG_CIFS_POSIX
case FS_IOC_GETFLAGS:
if (CIFS_UNIX_EXTATTR_CAP & caps) {
if (pSMBFile == NULL)
break;
rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
&ExtAttrBits, &ExtAttrMask);
if (rc == 0)
@ -80,8 +78,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
rc = -EFAULT;
break;
}
if (pSMBFile == NULL)
break;
/* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
extAttrBits, &ExtAttrMask);*/
}

View File

@ -569,10 +569,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
cFYI(1, "file id match, oplock break");
pCifsInode = CIFS_I(netfile->dentry->d_inode);
pCifsInode->clientCanCacheAll = false;
if (pSMB->OplockLevel == 0)
pCifsInode->clientCanCacheRead = false;
cifs_set_oplock_level(pCifsInode,
pSMB->OplockLevel);
/*
* cifs_oplock_break_put() can't be called
* from here. Get reference after queueing
@ -722,3 +721,23 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
cifs_sb_master_tcon(cifs_sb)->treeName);
}
}
void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
{
oplock &= 0xF;
if (oplock == OPLOCK_EXCLUSIVE) {
cinode->clientCanCacheAll = true;
cinode->clientCanCacheRead = true;
cFYI(1, "Exclusive Oplock granted on inode %p",
&cinode->vfs_inode);
} else if (oplock == OPLOCK_READ) {
cinode->clientCanCacheAll = false;
cinode->clientCanCacheRead = true;
cFYI(1, "Level II Oplock granted on inode %p",
&cinode->vfs_inode);
} else {
cinode->clientCanCacheAll = false;
cinode->clientCanCacheRead = false;
}
}