forked from Minki/linux
0041cd5a50
When preallocated service calls are being discarded, they're passed to
->discard_new_call() to have the caller clean up any attached higher-layer
preallocated pieces before being marked completed. However, the act of
marking them completed now invokes the call's notification function - which
causes a problem because that function might assume that the previously
freed pieces of memory are still there.
Fix this by setting a dummy notification function on the socket after
calling ->discard_new_call().
This results in the following kasan message when the kafs module is
removed.
==================================================================
BUG: KASAN: use-after-free in afs_wake_up_async_call+0x6aa/0x770 fs/afs/rxrpc.c:707
Write of size 1 at addr ffff8880946c39e4 by task kworker/u4:1/21
CPU: 0 PID: 21 Comm: kworker/u4:1 Not tainted 5.8.0-rc1-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
Workqueue: netns cleanup_net
Call Trace:
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x18f/0x20d lib/dump_stack.c:118
print_address_description.constprop.0.cold+0xd3/0x413 mm/kasan/report.c:383
__kasan_report mm/kasan/report.c:513 [inline]
kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530
afs_wake_up_async_call+0x6aa/0x770 fs/afs/rxrpc.c:707
rxrpc_notify_socket+0x1db/0x5d0 net/rxrpc/recvmsg.c:40
__rxrpc_set_call_completion.part.0+0x172/0x410 net/rxrpc/recvmsg.c:76
__rxrpc_call_completed net/rxrpc/recvmsg.c:112 [inline]
rxrpc_call_completed+0xca/0xf0 net/rxrpc/recvmsg.c:111
rxrpc_discard_prealloc+0x781/0xab0 net/rxrpc/call_accept.c:233
rxrpc_listen+0x147/0x360 net/rxrpc/af_rxrpc.c:245
afs_close_socket+0x95/0x320 fs/afs/rxrpc.c:110
afs_net_exit+0x1bc/0x310 fs/afs/main.c:155
ops_exit_list.isra.0+0xa8/0x150 net/core/net_namespace.c:186
cleanup_net+0x511/0xa50 net/core/net_namespace.c:603
process_one_work+0x965/0x1690 kernel/workqueue.c:2269
worker_thread+0x96/0xe10 kernel/workqueue.c:2415
kthread+0x3b5/0x4a0 kernel/kthread.c:291
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:293
Allocated by task 6820:
save_stack+0x1b/0x40 mm/kasan/common.c:48
set_track mm/kasan/common.c:56 [inline]
__kasan_kmalloc mm/kasan/common.c:494 [inline]
__kasan_kmalloc.constprop.0+0xbf/0xd0 mm/kasan/common.c:467
kmem_cache_alloc_trace+0x153/0x7d0 mm/slab.c:3551
kmalloc include/linux/slab.h:555 [inline]
kzalloc include/linux/slab.h:669 [inline]
afs_alloc_call+0x55/0x630 fs/afs/rxrpc.c:141
afs_charge_preallocation+0xe9/0x2d0 fs/afs/rxrpc.c:757
afs_open_socket+0x292/0x360 fs/afs/rxrpc.c:92
afs_net_init+0xa6c/0xe30 fs/afs/main.c:125
ops_init+0xaf/0x420 net/core/net_namespace.c:151
setup_net+0x2de/0x860 net/core/net_namespace.c:341
copy_net_ns+0x293/0x590 net/core/net_namespace.c:482
create_new_namespaces+0x3fb/0xb30 kernel/nsproxy.c:110
unshare_nsproxy_namespaces+0xbd/0x1f0 kernel/nsproxy.c:231
ksys_unshare+0x43d/0x8e0 kernel/fork.c:2983
__do_sys_unshare kernel/fork.c:3051 [inline]
__se_sys_unshare kernel/fork.c:3049 [inline]
__x64_sys_unshare+0x2d/0x40 kernel/fork.c:3049
do_syscall_64+0x60/0xe0 arch/x86/entry/common.c:359
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Freed by task 21:
save_stack+0x1b/0x40 mm/kasan/common.c:48
set_track mm/kasan/common.c:56 [inline]
kasan_set_free_info mm/kasan/common.c:316 [inline]
__kasan_slab_free+0xf7/0x140 mm/kasan/common.c:455
__cache_free mm/slab.c:3426 [inline]
kfree+0x109/0x2b0 mm/slab.c:3757
afs_put_call+0x585/0xa40 fs/afs/rxrpc.c:190
rxrpc_discard_prealloc+0x764/0xab0 net/rxrpc/call_accept.c:230
rxrpc_listen+0x147/0x360 net/rxrpc/af_rxrpc.c:245
afs_close_socket+0x95/0x320 fs/afs/rxrpc.c:110
afs_net_exit+0x1bc/0x310 fs/afs/main.c:155
ops_exit_list.isra.0+0xa8/0x150 net/core/net_namespace.c:186
cleanup_net+0x511/0xa50 net/core/net_namespace.c:603
process_one_work+0x965/0x1690 kernel/workqueue.c:2269
worker_thread+0x96/0xe10 kernel/workqueue.c:2415
kthread+0x3b5/0x4a0 kernel/kthread.c:291
ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:293
The buggy address belongs to the object at ffff8880946c3800
which belongs to the cache kmalloc-1k of size 1024
The buggy address is located 484 bytes inside of
1024-byte region [ffff8880946c3800, ffff8880946c3c00)
The buggy address belongs to the page:
page:ffffea000251b0c0 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0
flags: 0xfffe0000000200(slab)
raw: 00fffe0000000200 ffffea0002546508 ffffea00024fa248 ffff8880aa000c40
raw: 0000000000000000 ffff8880946c3000 0000000100000002 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8880946c3880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880946c3900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8880946c3980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8880946c3a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8880946c3a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Reported-by: syzbot+d3eccef36ddbd02713e9@syzkaller.appspotmail.com
Fixes: 5ac0d62226
("rxrpc: Fix missing notification")
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
678 lines
19 KiB
C
678 lines
19 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* incoming call handling
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/net.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/errqueue.h>
|
|
#include <linux/udp.h>
|
|
#include <linux/in.h>
|
|
#include <linux/in6.h>
|
|
#include <linux/icmp.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/circ_buf.h>
|
|
#include <net/sock.h>
|
|
#include <net/af_rxrpc.h>
|
|
#include <net/ip.h>
|
|
#include "ar-internal.h"
|
|
|
|
static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
|
|
unsigned long user_call_ID)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* Preallocate a single service call, connection and peer and, if possible,
|
|
* give them a user ID and attach the user's side of the ID to them.
|
|
*/
|
|
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
|
|
struct rxrpc_backlog *b,
|
|
rxrpc_notify_rx_t notify_rx,
|
|
rxrpc_user_attach_call_t user_attach_call,
|
|
unsigned long user_call_ID, gfp_t gfp,
|
|
unsigned int debug_id)
|
|
{
|
|
const void *here = __builtin_return_address(0);
|
|
struct rxrpc_call *call;
|
|
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
|
int max, tmp;
|
|
unsigned int size = RXRPC_BACKLOG_MAX;
|
|
unsigned int head, tail, call_head, call_tail;
|
|
|
|
max = rx->sk.sk_max_ack_backlog;
|
|
tmp = rx->sk.sk_ack_backlog;
|
|
if (tmp >= max) {
|
|
_leave(" = -ENOBUFS [full %u]", max);
|
|
return -ENOBUFS;
|
|
}
|
|
max -= tmp;
|
|
|
|
/* We don't need more conns and peers than we have calls, but on the
|
|
* other hand, we shouldn't ever use more peers than conns or conns
|
|
* than calls.
|
|
*/
|
|
call_head = b->call_backlog_head;
|
|
call_tail = READ_ONCE(b->call_backlog_tail);
|
|
tmp = CIRC_CNT(call_head, call_tail, size);
|
|
if (tmp >= max) {
|
|
_leave(" = -ENOBUFS [enough %u]", tmp);
|
|
return -ENOBUFS;
|
|
}
|
|
max = tmp + 1;
|
|
|
|
head = b->peer_backlog_head;
|
|
tail = READ_ONCE(b->peer_backlog_tail);
|
|
if (CIRC_CNT(head, tail, size) < max) {
|
|
struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
|
|
if (!peer)
|
|
return -ENOMEM;
|
|
b->peer_backlog[head] = peer;
|
|
smp_store_release(&b->peer_backlog_head,
|
|
(head + 1) & (size - 1));
|
|
}
|
|
|
|
head = b->conn_backlog_head;
|
|
tail = READ_ONCE(b->conn_backlog_tail);
|
|
if (CIRC_CNT(head, tail, size) < max) {
|
|
struct rxrpc_connection *conn;
|
|
|
|
conn = rxrpc_prealloc_service_connection(rxnet, gfp);
|
|
if (!conn)
|
|
return -ENOMEM;
|
|
b->conn_backlog[head] = conn;
|
|
smp_store_release(&b->conn_backlog_head,
|
|
(head + 1) & (size - 1));
|
|
|
|
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
|
|
atomic_read(&conn->usage), here);
|
|
}
|
|
|
|
/* Now it gets complicated, because calls get registered with the
|
|
* socket here, particularly if a user ID is preassigned by the user.
|
|
*/
|
|
call = rxrpc_alloc_call(rx, gfp, debug_id);
|
|
if (!call)
|
|
return -ENOMEM;
|
|
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
|
|
call->state = RXRPC_CALL_SERVER_PREALLOC;
|
|
|
|
trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
|
|
atomic_read(&call->usage),
|
|
here, (const void *)user_call_ID);
|
|
|
|
write_lock(&rx->call_lock);
|
|
if (user_attach_call) {
|
|
struct rxrpc_call *xcall;
|
|
struct rb_node *parent, **pp;
|
|
|
|
/* Check the user ID isn't already in use */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
if (user_call_ID < xcall->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > xcall->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
goto id_in_use;
|
|
}
|
|
|
|
call->user_call_ID = user_call_ID;
|
|
call->notify_rx = notify_rx;
|
|
rxrpc_get_call(call, rxrpc_call_got_kernel);
|
|
user_attach_call(call, user_call_ID);
|
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
rb_link_node(&call->sock_node, parent, pp);
|
|
rb_insert_color(&call->sock_node, &rx->calls);
|
|
set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
|
|
}
|
|
|
|
list_add(&call->sock_link, &rx->sock_calls);
|
|
|
|
write_unlock(&rx->call_lock);
|
|
|
|
rxnet = call->rxnet;
|
|
write_lock(&rxnet->call_lock);
|
|
list_add_tail(&call->link, &rxnet->calls);
|
|
write_unlock(&rxnet->call_lock);
|
|
|
|
b->call_backlog[call_head] = call;
|
|
smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
|
|
_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
|
|
return 0;
|
|
|
|
id_in_use:
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_cleanup_call(call);
|
|
_leave(" = -EBADSLT");
|
|
return -EBADSLT;
|
|
}
|
|
|
|
/*
|
|
* Preallocate sufficient service connections, calls and peers to cover the
|
|
* entire backlog of a socket. When a new call comes in, if we don't have
|
|
* sufficient of each available, the call gets rejected as busy or ignored.
|
|
*
|
|
* The backlog is replenished when a connection is accepted or rejected.
|
|
*/
|
|
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
|
|
if (!b) {
|
|
b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
|
|
if (!b)
|
|
return -ENOMEM;
|
|
rx->backlog = b;
|
|
}
|
|
|
|
if (rx->discard_new_call)
|
|
return 0;
|
|
|
|
while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
|
|
atomic_inc_return(&rxrpc_debug_id)) == 0)
|
|
;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Discard the preallocation on a service.
|
|
*/
|
|
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
|
|
unsigned int size = RXRPC_BACKLOG_MAX, head, tail;
|
|
|
|
if (!b)
|
|
return;
|
|
rx->backlog = NULL;
|
|
|
|
/* Make sure that there aren't any incoming calls in progress before we
|
|
* clear the preallocation buffers.
|
|
*/
|
|
spin_lock_bh(&rx->incoming_lock);
|
|
spin_unlock_bh(&rx->incoming_lock);
|
|
|
|
head = b->peer_backlog_head;
|
|
tail = b->peer_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_peer *peer = b->peer_backlog[tail];
|
|
kfree(peer);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
head = b->conn_backlog_head;
|
|
tail = b->conn_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_connection *conn = b->conn_backlog[tail];
|
|
write_lock(&rxnet->conn_lock);
|
|
list_del(&conn->link);
|
|
list_del(&conn->proc_link);
|
|
write_unlock(&rxnet->conn_lock);
|
|
kfree(conn);
|
|
if (atomic_dec_and_test(&rxnet->nr_conns))
|
|
wake_up_var(&rxnet->nr_conns);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
head = b->call_backlog_head;
|
|
tail = b->call_backlog_tail;
|
|
while (CIRC_CNT(head, tail, size) > 0) {
|
|
struct rxrpc_call *call = b->call_backlog[tail];
|
|
rcu_assign_pointer(call->socket, rx);
|
|
if (rx->discard_new_call) {
|
|
_debug("discard %lx", call->user_call_ID);
|
|
rx->discard_new_call(call, call->user_call_ID);
|
|
if (call->notify_rx)
|
|
call->notify_rx = rxrpc_dummy_notify;
|
|
rxrpc_put_call(call, rxrpc_call_put_kernel);
|
|
}
|
|
rxrpc_call_completed(call);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
tail = (tail + 1) & (size - 1);
|
|
}
|
|
|
|
kfree(b);
|
|
}
|
|
|
|
/*
|
|
* Ping the other end to fill our RTT cache and to retrieve the rwind
|
|
* and MTU parameters.
|
|
*/
|
|
static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
|
|
{
|
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
|
ktime_t now = skb->tstamp;
|
|
|
|
if (call->peer->rtt_count < 3 ||
|
|
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
|
|
rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
|
|
true, true,
|
|
rxrpc_propose_ack_ping_for_params);
|
|
}
|
|
|
|
/*
|
|
* Allocate a new incoming call from the prealloc pool, along with a connection
|
|
* and a peer as necessary.
|
|
*/
|
|
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
|
|
struct rxrpc_local *local,
|
|
struct rxrpc_peer *peer,
|
|
struct rxrpc_connection *conn,
|
|
const struct rxrpc_security *sec,
|
|
struct key *key,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
struct rxrpc_call *call;
|
|
unsigned short call_head, conn_head, peer_head;
|
|
unsigned short call_tail, conn_tail, peer_tail;
|
|
unsigned short call_count, conn_count;
|
|
|
|
/* #calls >= #conns >= #peers must hold true. */
|
|
call_head = smp_load_acquire(&b->call_backlog_head);
|
|
call_tail = b->call_backlog_tail;
|
|
call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
|
|
conn_head = smp_load_acquire(&b->conn_backlog_head);
|
|
conn_tail = b->conn_backlog_tail;
|
|
conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
|
|
ASSERTCMP(conn_count, >=, call_count);
|
|
peer_head = smp_load_acquire(&b->peer_backlog_head);
|
|
peer_tail = b->peer_backlog_tail;
|
|
ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
|
|
conn_count);
|
|
|
|
if (call_count == 0)
|
|
return NULL;
|
|
|
|
if (!conn) {
|
|
if (peer && !rxrpc_get_peer_maybe(peer))
|
|
peer = NULL;
|
|
if (!peer) {
|
|
peer = b->peer_backlog[peer_tail];
|
|
if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
|
|
return NULL;
|
|
b->peer_backlog[peer_tail] = NULL;
|
|
smp_store_release(&b->peer_backlog_tail,
|
|
(peer_tail + 1) &
|
|
(RXRPC_BACKLOG_MAX - 1));
|
|
|
|
rxrpc_new_incoming_peer(rx, local, peer);
|
|
}
|
|
|
|
/* Now allocate and set up the connection */
|
|
conn = b->conn_backlog[conn_tail];
|
|
b->conn_backlog[conn_tail] = NULL;
|
|
smp_store_release(&b->conn_backlog_tail,
|
|
(conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
|
|
conn->params.local = rxrpc_get_local(local);
|
|
conn->params.peer = peer;
|
|
rxrpc_see_connection(conn);
|
|
rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
|
|
} else {
|
|
rxrpc_get_connection(conn);
|
|
}
|
|
|
|
/* And now we can allocate and set up a new call */
|
|
call = b->call_backlog[call_tail];
|
|
b->call_backlog[call_tail] = NULL;
|
|
smp_store_release(&b->call_backlog_tail,
|
|
(call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
|
|
|
|
rxrpc_see_call(call);
|
|
call->conn = conn;
|
|
call->security = conn->security;
|
|
call->peer = rxrpc_get_peer(conn->params.peer);
|
|
call->cong_cwnd = call->peer->cong_cwnd;
|
|
return call;
|
|
}
|
|
|
|
/*
|
|
* Set up a new incoming call. Called in BH context with the RCU read lock
|
|
* held.
|
|
*
|
|
* If this is for a kernel service, when we allocate the call, it will have
|
|
* three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
|
|
* retainer ref obtained from the backlog buffer. Prealloc calls for userspace
|
|
* services only have the ref from the backlog buffer. We want to pass this
|
|
* ref to non-BH context to dispose of.
|
|
*
|
|
* If we want to report an error, we mark the skb with the packet type and
|
|
* abort code and return NULL.
|
|
*
|
|
* The call is returned with the user access mutex held.
|
|
*/
|
|
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
|
|
struct rxrpc_sock *rx,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
|
|
const struct rxrpc_security *sec = NULL;
|
|
struct rxrpc_connection *conn;
|
|
struct rxrpc_peer *peer = NULL;
|
|
struct rxrpc_call *call = NULL;
|
|
struct key *key = NULL;
|
|
|
|
_enter("");
|
|
|
|
spin_lock(&rx->incoming_lock);
|
|
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
|
|
rx->sk.sk_state == RXRPC_CLOSE) {
|
|
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
|
|
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
|
|
skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
|
|
skb->priority = RX_INVALID_OPERATION;
|
|
goto no_call;
|
|
}
|
|
|
|
/* The peer, connection and call may all have sprung into existence due
|
|
* to a duplicate packet being handled on another CPU in parallel, so
|
|
* we have to recheck the routing. However, we're now holding
|
|
* rx->incoming_lock, so the values should remain stable.
|
|
*/
|
|
conn = rxrpc_find_connection_rcu(local, skb, &peer);
|
|
|
|
if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
|
|
goto no_call;
|
|
|
|
call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
|
|
key_put(key);
|
|
if (!call) {
|
|
skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
|
|
goto no_call;
|
|
}
|
|
|
|
trace_rxrpc_receive(call, rxrpc_receive_incoming,
|
|
sp->hdr.serial, sp->hdr.seq);
|
|
|
|
/* Make the call live. */
|
|
rxrpc_incoming_call(rx, call, skb);
|
|
conn = call->conn;
|
|
|
|
if (rx->notify_new_call)
|
|
rx->notify_new_call(&rx->sk, call, call->user_call_ID);
|
|
else
|
|
sk_acceptq_added(&rx->sk);
|
|
|
|
spin_lock(&conn->state_lock);
|
|
switch (conn->state) {
|
|
case RXRPC_CONN_SERVICE_UNSECURED:
|
|
conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
|
|
set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
|
|
rxrpc_queue_conn(call->conn);
|
|
break;
|
|
|
|
case RXRPC_CONN_SERVICE:
|
|
write_lock(&call->state_lock);
|
|
if (call->state < RXRPC_CALL_COMPLETE) {
|
|
if (rx->discard_new_call)
|
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
else
|
|
call->state = RXRPC_CALL_SERVER_ACCEPTING;
|
|
}
|
|
write_unlock(&call->state_lock);
|
|
break;
|
|
|
|
case RXRPC_CONN_REMOTELY_ABORTED:
|
|
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
|
|
conn->abort_code, conn->error);
|
|
break;
|
|
case RXRPC_CONN_LOCALLY_ABORTED:
|
|
rxrpc_abort_call("CON", call, sp->hdr.seq,
|
|
conn->abort_code, conn->error);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
spin_unlock(&conn->state_lock);
|
|
spin_unlock(&rx->incoming_lock);
|
|
|
|
rxrpc_send_ping(call, skb);
|
|
|
|
if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
|
|
rxrpc_notify_socket(call);
|
|
|
|
/* We have to discard the prealloc queue's ref here and rely on a
|
|
* combination of the RCU read lock and refs held either by the socket
|
|
* (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
|
|
* service to prevent the call from being deallocated too early.
|
|
*/
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
|
|
_leave(" = %p{%d}", call, call->debug_id);
|
|
return call;
|
|
|
|
no_call:
|
|
spin_unlock(&rx->incoming_lock);
|
|
_leave(" = NULL [%u]", skb->mark);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* handle acceptance of a call by userspace
|
|
* - assign the user call ID to the call at the front of the queue
|
|
* - called with the socket locked.
|
|
*/
|
|
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
|
|
unsigned long user_call_ID,
|
|
rxrpc_notify_rx_t notify_rx)
|
|
__releases(&rx->sk.sk_lock.slock)
|
|
__acquires(call->user_mutex)
|
|
{
|
|
struct rxrpc_call *call;
|
|
struct rb_node *parent, **pp;
|
|
int ret;
|
|
|
|
_enter(",%lx", user_call_ID);
|
|
|
|
ASSERT(!irqs_disabled());
|
|
|
|
write_lock(&rx->call_lock);
|
|
|
|
if (list_empty(&rx->to_be_accepted)) {
|
|
write_unlock(&rx->call_lock);
|
|
release_sock(&rx->sk);
|
|
kleave(" = -ENODATA [empty]");
|
|
return ERR_PTR(-ENODATA);
|
|
}
|
|
|
|
/* check the user ID isn't already in use */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
|
|
if (user_call_ID < call->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > call->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
goto id_in_use;
|
|
}
|
|
|
|
/* Dequeue the first call and check it's still valid. We gain
|
|
* responsibility for the queue's reference.
|
|
*/
|
|
call = list_entry(rx->to_be_accepted.next,
|
|
struct rxrpc_call, accept_link);
|
|
write_unlock(&rx->call_lock);
|
|
|
|
/* We need to gain the mutex from the interrupt handler without
|
|
* upsetting lockdep, so we have to release it there and take it here.
|
|
* We are, however, still holding the socket lock, so other accepts
|
|
* must wait for us and no one can add the user ID behind our backs.
|
|
*/
|
|
if (mutex_lock_interruptible(&call->user_mutex) < 0) {
|
|
release_sock(&rx->sk);
|
|
kleave(" = -ERESTARTSYS");
|
|
return ERR_PTR(-ERESTARTSYS);
|
|
}
|
|
|
|
write_lock(&rx->call_lock);
|
|
list_del_init(&call->accept_link);
|
|
sk_acceptq_removed(&rx->sk);
|
|
rxrpc_see_call(call);
|
|
|
|
/* Find the user ID insertion point. */
|
|
pp = &rx->calls.rb_node;
|
|
parent = NULL;
|
|
while (*pp) {
|
|
parent = *pp;
|
|
call = rb_entry(parent, struct rxrpc_call, sock_node);
|
|
|
|
if (user_call_ID < call->user_call_ID)
|
|
pp = &(*pp)->rb_left;
|
|
else if (user_call_ID > call->user_call_ID)
|
|
pp = &(*pp)->rb_right;
|
|
else
|
|
BUG();
|
|
}
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
switch (call->state) {
|
|
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
|
|
break;
|
|
case RXRPC_CALL_COMPLETE:
|
|
ret = call->error;
|
|
goto out_release;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
/* formalise the acceptance */
|
|
call->notify_rx = notify_rx;
|
|
call->user_call_ID = user_call_ID;
|
|
rxrpc_get_call(call, rxrpc_call_got_userid);
|
|
rb_link_node(&call->sock_node, parent, pp);
|
|
rb_insert_color(&call->sock_node, &rx->calls);
|
|
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
|
|
BUG();
|
|
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_notify_socket(call);
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
release_sock(&rx->sk);
|
|
_leave(" = %p{%d}", call, call->debug_id);
|
|
return call;
|
|
|
|
out_release:
|
|
_debug("release %p", call);
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
goto out;
|
|
|
|
id_in_use:
|
|
ret = -EBADSLT;
|
|
write_unlock(&rx->call_lock);
|
|
out:
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
release_sock(&rx->sk);
|
|
_leave(" = %d", ret);
|
|
return ERR_PTR(ret);
|
|
}
|
|
|
|
/*
|
|
* Handle rejection of a call by userspace
|
|
* - reject the call at the front of the queue
|
|
*/
|
|
int rxrpc_reject_call(struct rxrpc_sock *rx)
|
|
{
|
|
struct rxrpc_call *call;
|
|
bool abort = false;
|
|
int ret;
|
|
|
|
_enter("");
|
|
|
|
ASSERT(!irqs_disabled());
|
|
|
|
write_lock(&rx->call_lock);
|
|
|
|
if (list_empty(&rx->to_be_accepted)) {
|
|
write_unlock(&rx->call_lock);
|
|
return -ENODATA;
|
|
}
|
|
|
|
/* Dequeue the first call and check it's still valid. We gain
|
|
* responsibility for the queue's reference.
|
|
*/
|
|
call = list_entry(rx->to_be_accepted.next,
|
|
struct rxrpc_call, accept_link);
|
|
list_del_init(&call->accept_link);
|
|
sk_acceptq_removed(&rx->sk);
|
|
rxrpc_see_call(call);
|
|
|
|
write_lock_bh(&call->state_lock);
|
|
switch (call->state) {
|
|
case RXRPC_CALL_SERVER_ACCEPTING:
|
|
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
|
|
abort = true;
|
|
/* fall through */
|
|
case RXRPC_CALL_COMPLETE:
|
|
ret = call->error;
|
|
goto out_discard;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
out_discard:
|
|
write_unlock_bh(&call->state_lock);
|
|
write_unlock(&rx->call_lock);
|
|
if (abort) {
|
|
rxrpc_send_abort_packet(call);
|
|
rxrpc_release_call(rx, call);
|
|
rxrpc_put_call(call, rxrpc_call_put);
|
|
}
|
|
rxrpc_service_prealloc(rx, GFP_KERNEL);
|
|
_leave(" = %d", ret);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
|
|
* @sock: The socket on which to preallocate
|
|
* @notify_rx: Event notification function for the call
|
|
* @user_attach_call: Func to attach call to user_call_ID
|
|
* @user_call_ID: The tag to attach to the preallocated call
|
|
* @gfp: The allocation conditions.
|
|
* @debug_id: The tracing debug ID.
|
|
*
|
|
* Charge up the socket with preallocated calls, each with a user ID. A
|
|
* function should be provided to effect the attachment from the user's side.
|
|
* The user is given a ref to hold on the call.
|
|
*
|
|
* Note that the call may be come connected before this function returns.
|
|
*/
|
|
int rxrpc_kernel_charge_accept(struct socket *sock,
|
|
rxrpc_notify_rx_t notify_rx,
|
|
rxrpc_user_attach_call_t user_attach_call,
|
|
unsigned long user_call_ID, gfp_t gfp,
|
|
unsigned int debug_id)
|
|
{
|
|
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
|
|
struct rxrpc_backlog *b = rx->backlog;
|
|
|
|
if (sock->sk->sk_state == RXRPC_CLOSE)
|
|
return -ESHUTDOWN;
|
|
|
|
return rxrpc_service_prealloc_one(rx, b, notify_rx,
|
|
user_attach_call, user_call_ID,
|
|
gfp, debug_id);
|
|
}
|
|
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);
|