summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-03 13:38:41 -0800
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-03 15:35:03 -0800
commit2efef837fb84f78cee7439804cb3722bffc64e75 (patch)
treeb69166832927f2141c4173cac456747605ea6252 /net/sunrpc
parent54cc533aaa0dc331ad126f0aacfb19572adee638 (diff)
RPC: Clean up rpc_execute...
The error values are already propagated through task->tk_status, and none of the callers check one without checking the other, so we can drop the return value. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/clnt.c14
-rw-r--r--net/sunrpc/sched.c12
2 files changed, 10 insertions, 16 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 16c9fbc1db69..e9d5f3c562e5 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -486,17 +486,13 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
/* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
rpc_task_sigmask(task, &oldset);
- rpc_call_setup(task, msg, 0);
-
/* Set up the call info struct and execute the task */
+ rpc_call_setup(task, msg, 0);
+ if (task->tk_status == 0) {
+ atomic_inc(&task->tk_count);
+ rpc_execute(task);
+ }
status = task->tk_status;
- if (status != 0)
- goto out;
- atomic_inc(&task->tk_count);
- status = rpc_execute(task);
- if (status == 0)
- status = task->tk_status;
-out:
rpc_put_task(task);
rpc_restore_sigmask(&oldset);
return status;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fc083f0b3544..13ab0c6fed01 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -625,7 +625,7 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
-static int __rpc_execute(struct rpc_task *task)
+static void __rpc_execute(struct rpc_task *task)
{
int status = 0;
@@ -679,9 +679,9 @@ static int __rpc_execute(struct rpc_task *task)
if (RPC_IS_ASYNC(task)) {
/* Careful! we may have raced... */
if (RPC_IS_QUEUED(task))
- return 0;
+ return;
if (rpc_test_and_set_running(task))
- return 0;
+ return;
continue;
}
@@ -710,7 +710,6 @@ static int __rpc_execute(struct rpc_task *task)
dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
- return status;
}
/*
@@ -722,12 +721,11 @@ static int __rpc_execute(struct rpc_task *task)
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
-int
-rpc_execute(struct rpc_task *task)
+void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_set_running(task);
- return __rpc_execute(task);
+ __rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)