dect
/
linux-2.6
Archived
13
0
Fork 0

Phonet: allocate sock from accept syscall rather than soft IRQ

This moves most of the accept logic to process context like other
socket stacks do. Then we can use a few more common socket helpers
and simplify a bit.

Signed-off-by: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rémi Denis-Courmont 2011-03-08 22:44:10 +00:00 committed by David S. Miller
parent 44c9ab16d2
commit f7ae8d59f6
3 changed files with 127 additions and 180 deletions

View File

@ -28,7 +28,6 @@ struct pep_sock {
/* XXX: union-ify listening vs connected stuff ? */
/* Listening socket stuff: */
struct hlist_head ackq;
struct hlist_head hlist;
/* Connected socket stuff: */

View File

@ -42,7 +42,7 @@
* TCP_ESTABLISHED connected pipe in enabled state
*
* pep_sock locking:
* - sk_state, ackq, hlist: sock lock needed
* - sk_state, hlist: sock lock needed
* - listener: read only
* - pipe_handle: read only
*/
@ -202,11 +202,12 @@ static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
GFP_KERNEL);
}
static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
gfp_t priority)
{
static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
WARN_ON(code == PN_PIPE_NO_ERROR);
return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
return pep_reply(sk, skb, code, data, sizeof(data), priority);
}
/* Control requests are not sent by the pipe service and have a specific
@ -365,7 +366,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
switch (hdr->message_id) {
case PNS_PEP_CONNECT_REQ:
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
break;
case PNS_PEP_DISCONNECT_REQ:
@ -574,7 +575,6 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_state = TCP_SYN_RECV;
sk->sk_backlog_rcv = pipe_do_rcv;
sk->sk_destruct = pipe_destruct;
pn->rx_credits = 0;
sk->sk_state_change(sk);
@ -582,96 +582,6 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
}
#endif
static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *newsk;
struct pep_sock *newpn, *pn = pep_sk(sk);
struct pnpipehdr *hdr;
struct sockaddr_pn dst, src;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
if (!pskb_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
hdr = pnp_hdr(skb);
pipe_handle = hdr->pipe_handle;
switch (hdr->state_after_connect) {
case PN_PIPE_DISABLE:
enabled = 0;
break;
case PN_PIPE_ENABLE:
enabled = 1;
break;
default:
pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
return -EINVAL;
}
peer_type = hdr->other_pep_type << 8;
/* Parse sub-blocks (options) */
n_sb = hdr->data[4];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
if (data == NULL)
return -EINVAL;
switch (type) {
case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
if (len < 1)
return -EINVAL;
peer_type = (peer_type & 0xff00) | data[0];
break;
case PN_PIPE_SB_ALIGNED_DATA:
aligned = data[0] != 0;
break;
}
n_sb--;
}
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Create a new to-be-accepted sock */
newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
if (!newsk) {
kfree_skb(skb);
return -ENOMEM;
}
sock_init_data(NULL, newsk);
newsk->sk_state = TCP_SYN_RECV;
newsk->sk_backlog_rcv = pipe_do_rcv;
newsk->sk_protocol = sk->sk_protocol;
newsk->sk_destruct = pipe_destruct;
newpn = pep_sk(newsk);
pn_skb_get_dst_sockaddr(skb, &dst);
pn_skb_get_src_sockaddr(skb, &src);
newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
skb_queue_head_init(&newpn->ctrlreq_queue);
newpn->pipe_handle = pipe_handle;
atomic_set(&newpn->tx_credits, 0);
newpn->peer_type = peer_type;
newpn->rx_credits = 0;
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
newpn->aligned = aligned;
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
sk_acceptq_added(sk);
sk_add_node(newsk, &pn->ackq);
return 0;
}
/* Listening sock must be locked */
static struct sock *pep_find_pipe(const struct hlist_head *hlist,
const struct sockaddr_pn *dst,
@ -726,22 +636,18 @@ static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sknode)
return sk_receive_skb(sknode, skb, 1);
/* Look for a pipe handle pending accept */
sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
if (sknode) {
sock_put(sknode);
if (net_ratelimit())
printk(KERN_WARNING"Phonet unconnected PEP ignored");
goto drop;
}
switch (hdr->message_id) {
case PNS_PEP_CONNECT_REQ:
if (sk->sk_state == TCP_LISTEN && !sk_acceptq_is_full(sk))
pep_connreq_rcv(sk, skb);
else
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
break;
if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
GFP_ATOMIC);
break;
}
skb_queue_head(&sk->sk_receive_queue, skb);
sk_acceptq_added(sk);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
return NET_RX_SUCCESS;
#ifdef CONFIG_PHONET_PIPECTRLR
case PNS_PEP_CONNECT_RESP:
@ -799,24 +705,16 @@ static void pep_sock_close(struct sock *sk, long timeout)
sk_common_release(sk);
lock_sock(sk);
if (sk->sk_state == TCP_LISTEN) {
/* Destroy the listen queue */
struct sock *sknode;
struct hlist_node *p, *n;
sk_for_each_safe(sknode, p, n, &pn->ackq)
sk_del_node_init(sknode);
sk->sk_state = TCP_CLOSE;
} else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
#ifndef CONFIG_PHONET_PIPECTRLR
/* Forcefully remove dangling Phonet pipe */
pipe_do_remove(sk);
#else
/* send pep disconnect request */
pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD, NULL, 0);
sk->sk_state = TCP_CLOSE;
#endif
}
sk->sk_state = TCP_CLOSE;
ifindex = pn->ifindex;
pn->ifindex = 0;
@ -827,69 +725,121 @@ static void pep_sock_close(struct sock *sk, long timeout)
sock_put(sk);
}
static int pep_wait_connreq(struct sock *sk, int noblock)
{
struct task_struct *tsk = current;
struct pep_sock *pn = pep_sk(sk);
long timeo = sock_rcvtimeo(sk, noblock);
for (;;) {
DEFINE_WAIT(wait);
if (sk->sk_state != TCP_LISTEN)
return -EINVAL;
if (!hlist_empty(&pn->ackq))
break;
if (!timeo)
return -EWOULDBLOCK;
if (signal_pending(tsk))
return sock_intr_errno(timeo);
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
finish_wait(sk_sleep(sk), &wait);
}
return 0;
}
static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
{
struct pep_sock *pn = pep_sk(sk);
struct pep_sock *pn = pep_sk(sk), *newpn;
struct sock *newsk = NULL;
struct sk_buff *oskb;
struct sk_buff *skb;
struct pnpipehdr *hdr;
struct sockaddr_pn dst, src;
int err;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
u8 aligned = 0;
skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
if (!skb)
return NULL;
lock_sock(sk);
err = pep_wait_connreq(sk, flags & O_NONBLOCK);
if (err)
goto out;
newsk = __sk_head(&pn->ackq);
oskb = skb_dequeue(&newsk->sk_receive_queue);
err = pep_accept_conn(newsk, oskb);
if (err) {
skb_queue_head(&newsk->sk_receive_queue, oskb);
newsk = NULL;
goto out;
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto drop;
}
kfree_skb(oskb);
sock_hold(sk);
pep_sk(newsk)->listener = sk;
sock_hold(newsk);
sk_del_node_init(newsk);
sk_acceptq_removed(sk);
sk_add_node(newsk, &pn->hlist);
__sock_put(newsk);
out:
err = -EPROTO;
if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
goto drop;
hdr = pnp_hdr(skb);
pipe_handle = hdr->pipe_handle;
switch (hdr->state_after_connect) {
case PN_PIPE_DISABLE:
enabled = 0;
break;
case PN_PIPE_ENABLE:
enabled = 1;
break;
default:
pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
GFP_KERNEL);
goto drop;
}
peer_type = hdr->other_pep_type << 8;
/* Parse sub-blocks (options) */
n_sb = hdr->data[4];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
if (data == NULL)
goto drop;
switch (type) {
case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
if (len < 1)
goto drop;
peer_type = (peer_type & 0xff00) | data[0];
break;
case PN_PIPE_SB_ALIGNED_DATA:
aligned = data[0] != 0;
break;
}
n_sb--;
}
/* Check for duplicate pipe handle */
newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
if (unlikely(newsk)) {
__sock_put(newsk);
newsk = NULL;
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
goto drop;
}
/* Create a new to-be-accepted sock */
newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
if (!newsk) {
pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
err = -ENOBUFS;
goto drop;
}
sock_init_data(NULL, newsk);
newsk->sk_state = TCP_SYN_RECV;
newsk->sk_backlog_rcv = pipe_do_rcv;
newsk->sk_protocol = sk->sk_protocol;
newsk->sk_destruct = pipe_destruct;
newpn = pep_sk(newsk);
pn_skb_get_dst_sockaddr(skb, &dst);
pn_skb_get_src_sockaddr(skb, &src);
newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
sock_hold(sk);
newpn->listener = sk;
skb_queue_head_init(&newpn->ctrlreq_queue);
newpn->pipe_handle = pipe_handle;
atomic_set(&newpn->tx_credits, 0);
newpn->ifindex = 0;
newpn->peer_type = peer_type;
newpn->rx_credits = 0;
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
newpn->aligned = aligned;
err = pep_accept_conn(newsk, skb);
if (err) {
sock_put(newsk);
newsk = NULL;
goto drop;
}
sk_add_node(newsk, &pn->hlist);
drop:
release_sock(sk);
kfree_skb(skb);
*errp = err;
return newsk;
}
@ -937,7 +887,7 @@ static int pep_init(struct sock *sk)
{
struct pep_sock *pn = pep_sk(sk);
INIT_HLIST_HEAD(&pn->ackq);
sk->sk_destruct = pipe_destruct;
INIT_HLIST_HEAD(&pn->hlist);
skb_queue_head_init(&pn->ctrlreq_queue);
pn->pipe_handle = PN_PIPE_INVALID_HANDLE;

View File

@ -327,6 +327,9 @@ static int pn_socket_accept(struct socket *sock, struct socket *newsock,
struct sock *newsk;
int err;
if (unlikely(sk->sk_state != TCP_LISTEN))
return -EINVAL;
newsk = sk->sk_prot->accept(sk, flags, &err);
if (!newsk)
return err;
@ -363,13 +366,8 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
poll_wait(file, sk_sleep(sk), wait);
switch (sk->sk_state) {
case TCP_LISTEN:
return hlist_empty(&pn->ackq) ? 0 : POLLIN;
case TCP_CLOSE:
if (sk->sk_state == TCP_CLOSE)
return POLLERR;
}
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (!skb_queue_empty(&pn->ctrlreq_queue))