summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-03-30 23:53:32 +0200
committerIngo Molnar <mingo@elte.hu>2009-03-30 23:53:32 +0200
commit65fb0d23fcddd8697c871047b700c78817bdaa43 (patch)
tree119e6e5f276622c4c862f6c9b6d795264ba1603a /net/sunrpc
parent8c083f081d0014057901c68a0a3e0f8ca7ac8d23 (diff)
parentdfbbe89e197a77f2c8046a51c74e33e35f878080 (diff)
Merge branch 'linus' into cpumask-for-linus
Conflicts: arch/x86/kernel/cpu/common.c
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/sched.c33
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c76
4 files changed, 61 insertions, 52 deletions
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 577385a4a5dc..9ced0628d69c 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -480,7 +480,7 @@ static int rpc_delete_dentry(struct dentry *dentry)
return 1;
}
-static struct dentry_operations rpc_dentry_operations = {
+static const struct dentry_operations rpc_dentry_operations = {
.d_delete = rpc_delete_dentry,
};
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 385f427bedad..ff50a0546865 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task)
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
- /* We might have raced */
- if (RPC_IS_QUEUED(task)) {
- rpc_clear_running(task);
- return;
- }
if (RPC_IS_ASYNC(task)) {
int status;
@@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
*/
static void __rpc_execute(struct rpc_task *task)
{
- int status = 0;
+ struct rpc_wait_queue *queue;
+ int task_is_async = RPC_IS_ASYNC(task);
+ int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
@@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task)
*/
if (!RPC_IS_QUEUED(task))
continue;
- rpc_clear_running(task);
- if (RPC_IS_ASYNC(task)) {
- /* Careful! we may have raced... */
- if (RPC_IS_QUEUED(task))
- return;
- if (rpc_test_and_set_running(task))
- return;
+ /*
+ * The queue->lock protects against races with
+ * rpc_make_runnable().
+ *
+ * Note that once we clear RPC_TASK_RUNNING on an asynchronous
+ * rpc_task, rpc_make_runnable() can assign it to a
+ * different workqueue. We therefore cannot assume that the
+ * rpc_task pointer may still be dereferenced.
+ */
+ queue = task->tk_waitqueue;
+ spin_lock_bh(&queue->lock);
+ if (!RPC_IS_QUEUED(task)) {
+ spin_unlock_bh(&queue->lock);
continue;
}
+ rpc_clear_running(task);
+ spin_unlock_bh(&queue->lock);
+ if (task_is_async)
+ return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 29e401bb612e..62098d101a1f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -663,7 +663,7 @@ void xprt_connect(struct rpc_task *task)
xprt, (xprt_connected(xprt) ? "is" : "is not"));
if (!xprt_bound(xprt)) {
- task->tk_status = -EIO;
+ task->tk_status = -EAGAIN;
return;
}
if (!xprt_lock_write(xprt, task))
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 5cbb404c4cdf..568330eebbfe 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -467,7 +467,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
int err, sent = 0;
if (unlikely(!sock))
- return -ENOTCONN;
+ return -ENOTSOCK;
clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
if (base != 0) {
@@ -577,6 +577,8 @@ static int xs_udp_send_request(struct rpc_task *task)
req->rq_svec->iov_base,
req->rq_svec->iov_len);
+ if (!xprt_bound(xprt))
+ return -ENOTCONN;
status = xs_sendpages(transport->sock,
xs_addr(xprt),
xprt->addrlen, xdr,
@@ -594,6 +596,10 @@ static int xs_udp_send_request(struct rpc_task *task)
}
switch (status) {
+ case -ENOTSOCK:
+ status = -ENOTCONN;
+ /* Should we call xs_close() here? */
+ break;
case -EAGAIN:
xs_nospace(task);
break;
@@ -693,6 +699,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
}
switch (status) {
+ case -ENOTSOCK:
+ status = -ENOTCONN;
+ /* Should we call xs_close() here? */
+ break;
case -EAGAIN:
xs_nospace(task);
break;
@@ -1215,6 +1225,23 @@ out:
read_unlock(&sk->sk_callback_lock);
}
+static void xs_write_space(struct sock *sk)
+{
+ struct socket *sock;
+ struct rpc_xprt *xprt;
+
+ if (unlikely(!(sock = sk->sk_socket)))
+ return;
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+
+ if (unlikely(!(xprt = xprt_from_sock(sk))))
+ return;
+ if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
+ return;
+
+ xprt_write_space(xprt);
+}
+
/**
* xs_udp_write_space - callback invoked when socket buffer space
* becomes available
@@ -1230,23 +1257,9 @@ static void xs_udp_write_space(struct sock *sk)
read_lock(&sk->sk_callback_lock);
/* from net/core/sock.c:sock_def_write_space */
- if (sock_writeable(sk)) {
- struct socket *sock;
- struct rpc_xprt *xprt;
-
- if (unlikely(!(sock = sk->sk_socket)))
- goto out;
- clear_bit(SOCK_NOSPACE, &sock->flags);
-
- if (unlikely(!(xprt = xprt_from_sock(sk))))
- goto out;
- if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
- goto out;
+ if (sock_writeable(sk))
+ xs_write_space(sk);
- xprt_write_space(xprt);
- }
-
- out:
read_unlock(&sk->sk_callback_lock);
}
@@ -1265,23 +1278,9 @@ static void xs_tcp_write_space(struct sock *sk)
read_lock(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
- struct socket *sock;
- struct rpc_xprt *xprt;
-
- if (unlikely(!(sock = sk->sk_socket)))
- goto out;
- clear_bit(SOCK_NOSPACE, &sock->flags);
-
- if (unlikely(!(xprt = xprt_from_sock(sk))))
- goto out;
- if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
- goto out;
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ xs_write_space(sk);
- xprt_write_space(xprt);
- }
-
- out:
read_unlock(&sk->sk_callback_lock);
}
@@ -1523,7 +1522,7 @@ static void xs_udp_connect_worker4(struct work_struct *work)
struct socket *sock = transport->sock;
int err, status = -EIO;
- if (xprt->shutdown || !xprt_bound(xprt))
+ if (xprt->shutdown)
goto out;
/* Start by resetting any existing state */
@@ -1564,7 +1563,7 @@ static void xs_udp_connect_worker6(struct work_struct *work)
struct socket *sock = transport->sock;
int err, status = -EIO;
- if (xprt->shutdown || !xprt_bound(xprt))
+ if (xprt->shutdown)
goto out;
/* Start by resetting any existing state */
@@ -1648,6 +1647,9 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
write_unlock_bh(&sk->sk_callback_lock);
}
+ if (!xprt_bound(xprt))
+ return -ENOTCONN;
+
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
@@ -1668,7 +1670,7 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
struct socket *sock = transport->sock;
int err, status = -EIO;
- if (xprt->shutdown || !xprt_bound(xprt))
+ if (xprt->shutdown)
goto out;
if (!sock) {
@@ -1728,7 +1730,7 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
struct socket *sock = transport->sock;
int err, status = -EIO;
- if (xprt->shutdown || !xprt_bound(xprt))
+ if (xprt->shutdown)
goto out;
if (!sock) {