dect
/
linux-2.6
Archived
13
0
Fork 0

NFS: Switch from intr mount option to TASK_KILLABLE

By using the TASK_KILLABLE infrastructure, we can get rid of the 'intr'
mount option.  We have to use _killable everywhere instead of _interruptible
as we get rid of rpc_clnt_sigmask/sigunmask.

Signed-off-by: Liam R. Howlett <howlett@gmail.com>
Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
This commit is contained in:
Matthew Wilcox 2007-12-06 16:24:39 -05:00 committed by Matthew Wilcox
parent 009e577e07
commit 150030b78a
19 changed files with 30 additions and 147 deletions

View File

@ -302,7 +302,7 @@ found_client:
if (new)
nfs_free_client(new);
error = wait_event_interruptible(nfs_client_active_wq,
error = wait_event_killable(nfs_client_active_wq,
clp->cl_cons_state != NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
@ -494,10 +494,6 @@ static int nfs_init_server_rpcclient(struct nfs_server *server, rpc_authflavor_t
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
server->client->cl_intr = 0;
if (server->flags & NFS4_MOUNT_INTR)
server->client->cl_intr = 1;
return 0;
}

View File

@ -193,7 +193,7 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
if (dreq->iocb)
goto out;
result = wait_for_completion_interruptible(&dreq->completion);
result = wait_for_completion_killable(&dreq->completion);
if (!result)
result = dreq->error;
@ -391,9 +391,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
ssize_t result = 0;
sigset_t oldset;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_direct_req *dreq;
dreq = nfs_direct_req_alloc();
@ -405,11 +403,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
rpc_clnt_sigmask(clnt, &oldset);
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
nfs_direct_req_release(dreq);
return result;
@ -767,9 +763,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
size_t count)
{
ssize_t result = 0;
sigset_t oldset;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_direct_req *dreq;
size_t wsize = NFS_SERVER(inode)->wsize;
int sync = 0;
@ -787,11 +781,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
rpc_clnt_sigmask(clnt, &oldset);
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
nfs_direct_req_release(dreq);
return result;

View File

@ -433,15 +433,11 @@ static int nfs_wait_schedule(void *word)
*/
static int nfs_wait_on_inode(struct inode *inode)
{
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_inode *nfsi = NFS_I(inode);
sigset_t oldmask;
int error;
rpc_clnt_sigmask(clnt, &oldmask);
error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING,
nfs_wait_schedule, TASK_INTERRUPTIBLE);
rpc_clnt_sigunmask(clnt, &oldmask);
nfs_wait_schedule, TASK_KILLABLE);
return error;
}

View File

@ -56,7 +56,7 @@ int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path,
.program = &mnt_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_INTR,
.flags = 0,
};
struct rpc_clnt *mnt_clnt;
int status;

View File

@ -27,17 +27,14 @@
static int
nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
{
sigset_t oldset;
int res;
rpc_clnt_sigmask(clnt, &oldset);
do {
res = rpc_call_sync(clnt, msg, flags);
if (res != -EJUKEBOX)
break;
schedule_timeout_interruptible(NFS_JUKEBOX_RETRY_TIME);
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
} while (!signalled());
rpc_clnt_sigunmask(clnt, &oldset);
} while (!fatal_signal_pending(current));
return res;
}

View File

@ -316,12 +316,9 @@ static void nfs4_opendata_put(struct nfs4_opendata *p)
static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
{
sigset_t oldset;
int ret;
rpc_clnt_sigmask(task->tk_client, &oldset);
ret = rpc_wait_for_completion_task(task);
rpc_clnt_sigunmask(task->tk_client, &oldset);
return ret;
}
@ -2806,9 +2803,9 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
return 0;
}
static int nfs4_wait_bit_interruptible(void *word)
static int nfs4_wait_bit_killable(void *word)
{
if (signal_pending(current))
if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
@ -2816,18 +2813,14 @@ static int nfs4_wait_bit_interruptible(void *word)
static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
{
sigset_t oldset;
int res;
might_sleep();
rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_);
rpc_clnt_sigmask(clnt, &oldset);
res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER,
nfs4_wait_bit_interruptible,
TASK_INTERRUPTIBLE);
rpc_clnt_sigunmask(clnt, &oldset);
nfs4_wait_bit_killable, TASK_KILLABLE);
rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_);
return res;
@ -2835,7 +2828,6 @@ static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp)
static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
{
sigset_t oldset;
int res = 0;
might_sleep();
@ -2844,14 +2836,9 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
*timeout = NFS4_POLL_RETRY_MIN;
if (*timeout > NFS4_POLL_RETRY_MAX)
*timeout = NFS4_POLL_RETRY_MAX;
rpc_clnt_sigmask(clnt, &oldset);
if (clnt->cl_intr) {
schedule_timeout_interruptible(*timeout);
if (signalled())
res = -ERESTARTSYS;
} else
schedule_timeout_uninterruptible(*timeout);
rpc_clnt_sigunmask(clnt, &oldset);
schedule_timeout_killable(*timeout);
if (fatal_signal_pending(current))
res = -ERESTARTSYS;
*timeout <<= 1;
return res;
}
@ -3085,7 +3072,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
schedule_timeout_interruptible(timeout);
schedule_timeout_killable(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;

View File

@ -228,10 +228,7 @@ static int __init root_nfs_parse(char *name, char *buf)
nfs_data.flags &= ~NFS_MOUNT_SOFT;
break;
case Opt_intr:
nfs_data.flags |= NFS_MOUNT_INTR;
break;
case Opt_nointr:
nfs_data.flags &= ~NFS_MOUNT_INTR;
break;
case Opt_posix:
nfs_data.flags |= NFS_MOUNT_POSIX;

View File

@ -67,7 +67,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
if (req != NULL)
break;
if (signalled() && (server->flags & NFS_MOUNT_INTR))
if (fatal_signal_pending(current))
return ERR_PTR(-ERESTARTSYS);
yield();
}
@ -175,11 +175,11 @@ void nfs_release_request(struct nfs_page *req)
kref_put(&req->wb_kref, nfs_free_request);
}
static int nfs_wait_bit_interruptible(void *word)
static int nfs_wait_bit_killable(void *word)
{
int ret = 0;
if (signal_pending(current))
if (fatal_signal_pending(current))
ret = -ERESTARTSYS;
else
schedule();
@ -190,26 +190,18 @@ static int nfs_wait_bit_interruptible(void *word)
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
*
* Interruptible by signals only if mounted with intr flag.
* Interruptible by fatal signals only.
* The user is responsible for holding a count on the request.
*/
int
nfs_wait_on_request(struct nfs_page *req)
{
struct rpc_clnt *clnt = NFS_CLIENT(req->wb_context->path.dentry->d_inode);
sigset_t oldmask;
int ret = 0;
if (!test_bit(PG_BUSY, &req->wb_flags))
goto out;
/*
* Note: the call to rpc_clnt_sigmask() suffices to ensure that we
* are not interrupted if intr flag is not set
*/
rpc_clnt_sigmask(clnt, &oldmask);
ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
nfs_wait_bit_interruptible, TASK_INTERRUPTIBLE);
rpc_clnt_sigunmask(clnt, &oldmask);
nfs_wait_bit_killable, TASK_KILLABLE);
out:
return ret;
}

View File

@ -212,12 +212,7 @@ nfs_async_read_error(struct list_head *head)
*/
static void nfs_execute_read(struct nfs_read_data *data)
{
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
sigset_t oldset;
rpc_clnt_sigmask(clnt, &oldset);
rpc_execute(&data->task);
rpc_clnt_sigunmask(clnt, &oldset);
}
/*

View File

@ -424,7 +424,6 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
{ NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", "" },
@ -624,10 +623,7 @@ static int nfs_parse_mount_options(char *raw,
mnt->flags &= ~NFS_MOUNT_SOFT;
break;
case Opt_intr:
mnt->flags |= NFS_MOUNT_INTR;
break;
case Opt_nointr:
mnt->flags &= ~NFS_MOUNT_INTR;
break;
case Opt_posix:
mnt->flags |= NFS_MOUNT_POSIX;

View File

@ -490,7 +490,7 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
/*
* Wait for a request to complete.
*
* Interruptible by signals only if mounted with intr flag.
* Interruptible by fatal signals only.
*/
static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
{
@ -816,12 +816,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
static void nfs_execute_write(struct nfs_write_data *data)
{
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
sigset_t oldset;
rpc_clnt_sigmask(clnt, &oldset);
rpc_execute(&data->task);
rpc_clnt_sigunmask(clnt, &oldset);
}
/*

View File

@ -516,14 +516,7 @@ extern void * nfs_root_data(void);
#define nfs_wait_event(clnt, wq, condition) \
({ \
int __retval = 0; \
if (clnt->cl_intr) { \
sigset_t oldmask; \
rpc_clnt_sigmask(clnt, &oldmask); \
__retval = wait_event_interruptible(wq, condition); \
rpc_clnt_sigunmask(clnt, &oldmask); \
} else \
wait_event(wq, condition); \
int __retval = wait_event_killable(wq, condition); \
__retval; \
})

View File

@ -48,7 +48,7 @@ struct nfs_mount_data {
/* bits in the flags field */
#define NFS_MOUNT_SOFT 0x0001 /* 1 */
#define NFS_MOUNT_INTR 0x0002 /* 1 */
#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
#define NFS_MOUNT_SECURE 0x0004 /* 1 */
#define NFS_MOUNT_POSIX 0x0008 /* 1 */
#define NFS_MOUNT_NOCTO 0x0010 /* 1 */

View File

@ -41,7 +41,6 @@ struct rpc_clnt {
struct rpc_iostats * cl_metrics; /* per-client statistics */
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_intr : 1,/* interruptible */
cl_discrtry : 1,/* disconnect before retry */
cl_autobind : 1;/* use getport() */
@ -109,7 +108,6 @@ struct rpc_create_args {
/* Values for "flags" field */
#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
#define RPC_CLNT_CREATE_INTR (1UL << 1)
#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
#define RPC_CLNT_CREATE_NOPING (1UL << 4)

View File

@ -128,7 +128,6 @@ struct rpc_call_ops {
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
@ -136,7 +135,6 @@ struct rpc_call_ops {
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
#define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1

View File

@ -281,7 +281,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
return clnt;
if (!(args->flags & RPC_CLNT_CREATE_NOPING)) {
int err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
int err = rpc_ping(clnt, RPC_TASK_SOFT);
if (err != 0) {
rpc_shutdown_client(clnt);
return ERR_PTR(err);
@ -292,8 +292,6 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
if (args->flags & RPC_CLNT_CREATE_HARDRTRY)
clnt->cl_softrtry = 0;
if (args->flags & RPC_CLNT_CREATE_INTR)
clnt->cl_intr = 1;
if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
clnt->cl_autobind = 1;
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
@ -459,7 +457,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
clnt->cl_prog = program->number;
clnt->cl_vers = version->number;
clnt->cl_stats = program->stats;
err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
err = rpc_ping(clnt, RPC_TASK_SOFT);
if (err != 0) {
rpc_shutdown_client(clnt);
clnt = ERR_PTR(err);
@ -480,44 +478,6 @@ static const struct rpc_call_ops rpc_default_ops = {
.rpc_call_done = rpc_default_callback,
};
/*
* Export the signal mask handling for synchronous code that
* sleeps on RPC calls
*/
#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
static void rpc_save_sigmask(sigset_t *oldset, int intr)
{
unsigned long sigallow = sigmask(SIGKILL);
sigset_t sigmask;
/* Block all signals except those listed in sigallow */
if (intr)
sigallow |= RPC_INTR_SIGNALS;
siginitsetinv(&sigmask, sigallow);
sigprocmask(SIG_BLOCK, &sigmask, oldset);
}
static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
{
rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
}
static inline void rpc_restore_sigmask(sigset_t *oldset)
{
sigprocmask(SIG_SETMASK, oldset, NULL);
}
void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
rpc_save_sigmask(oldset, clnt->cl_intr);
}
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
{
rpc_restore_sigmask(oldset);
}
static
struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
struct rpc_message *msg,
@ -526,7 +486,6 @@ struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
void *data)
{
struct rpc_task *task, *ret;
sigset_t oldset;
task = rpc_new_task(clnt, flags, ops, data);
if (task == NULL) {
@ -535,7 +494,6 @@ struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
}
/* Mask signals on synchronous RPC calls and RPCSEC_GSS upcalls */
rpc_task_sigmask(task, &oldset);
if (msg != NULL) {
rpc_call_setup(task, msg, 0);
if (task->tk_status != 0) {
@ -548,7 +506,6 @@ struct rpc_task *rpc_do_run_task(struct rpc_clnt *clnt,
rpc_execute(task);
ret = task;
out:
rpc_restore_sigmask(&oldset);
return ret;
}

View File

@ -172,8 +172,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
.program = &rpcb_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
.flags = (RPC_CLNT_CREATE_NOPING |
RPC_CLNT_CREATE_INTR),
.flags = RPC_CLNT_CREATE_NOPING,
};
switch (srvaddr->sa_family) {

View File

@ -245,9 +245,9 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
}
EXPORT_SYMBOL(rpc_init_wait_queue);
static int rpc_wait_bit_interruptible(void *word)
static int rpc_wait_bit_killable(void *word)
{
if (signal_pending(current))
if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
@ -299,9 +299,9 @@ static void rpc_mark_complete_task(struct rpc_task *task)
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
action = rpc_wait_bit_interruptible;
action = rpc_wait_bit_killable;
return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_INTERRUPTIBLE);
action, TASK_KILLABLE);
}
EXPORT_SYMBOL(__rpc_wait_for_completion_task);
@ -690,10 +690,9 @@ static void __rpc_execute(struct rpc_task *task)
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
/* Note: Caller should be using rpc_clnt_sigmask() */
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_interruptible,
TASK_INTERRUPTIBLE);
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
@ -837,8 +836,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
kref_get(&clnt->cl_kref);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
if (!clnt->cl_intr)
task->tk_flags |= RPC_TASK_NOINTR;
}
BUG_ON(task->tk_ops == NULL);

View File

@ -38,8 +38,6 @@ EXPORT_SYMBOL(rpc_killall_tasks);
EXPORT_SYMBOL(rpc_call_sync);
EXPORT_SYMBOL(rpc_call_async);
EXPORT_SYMBOL(rpc_call_setup);
EXPORT_SYMBOL(rpc_clnt_sigmask);
EXPORT_SYMBOL(rpc_clnt_sigunmask);
EXPORT_SYMBOL(rpc_delay);
EXPORT_SYMBOL(rpc_restart_call);
EXPORT_SYMBOL(rpc_setbufsize);