2017-11-06 17:11:51 +00:00
|
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* n_gsm.c GSM 0710 tty multiplexor
|
|
|
|
|
* Copyright (c) 2009/10 Intel Corporation
|
2023-10-27 05:39:03 +00:00
|
|
|
|
* Copyright (c) 2022/23 Siemens Mobility GmbH
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE *
|
|
|
|
|
*
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
* Outgoing path:
|
|
|
|
|
* tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc
|
|
|
|
|
* control message -> GSM MUX control queue --´
|
|
|
|
|
*
|
|
|
|
|
* Incoming path:
|
|
|
|
|
* ldisc -> gsm_queue() -o--> tty
|
|
|
|
|
* `-> gsm_control_response()
|
|
|
|
|
*
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* TO DO:
|
|
|
|
|
* Mostly done: ioctls for setting modes/timing
|
2010-11-04 15:17:27 +00:00
|
|
|
|
* Partly done: hooks so you can pull off frames to non tty devs
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* Restart DLCI 0 when it closes ?
|
|
|
|
|
* Improve the tx engine
|
|
|
|
|
* Resolve tx side locking by adding a queue_head and routing
|
|
|
|
|
* all control traffic via it
|
|
|
|
|
* General tidy/document
|
|
|
|
|
* Review the locking/move to refcounts more (mux now moved to an
|
|
|
|
|
* alloc/free model ready)
|
|
|
|
|
* Use newest tty open/close port helpers and install hooks
|
|
|
|
|
* What to do about power functions ?
|
|
|
|
|
* Termios setting and negotiation
|
|
|
|
|
* Do we need a 'which mux are you' ioctl to correlate mux and tty sets
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
#include <linux/major.h>
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
|
#include <linux/signal.h>
|
|
|
|
|
#include <linux/fcntl.h>
|
2017-02-02 18:15:33 +00:00
|
|
|
|
#include <linux/sched/signal.h>
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
|
#include <linux/tty.h>
|
2022-11-03 09:17:43 +00:00
|
|
|
|
#include <linux/bitfield.h>
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
|
#include <linux/mm.h>
|
2022-11-03 09:17:42 +00:00
|
|
|
|
#include <linux/math.h>
|
2023-04-11 16:45:32 +00:00
|
|
|
|
#include <linux/nospec.h>
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#include <linux/string.h>
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
#include <linux/poll.h>
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
|
#include <linux/file.h>
|
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
#include <linux/timer.h>
|
|
|
|
|
#include <linux/tty_flip.h>
|
|
|
|
|
#include <linux/tty_driver.h>
|
|
|
|
|
#include <linux/serial.h>
|
|
|
|
|
#include <linux/kfifo.h>
|
|
|
|
|
#include <linux/skbuff.h>
|
2011-06-16 21:20:12 +00:00
|
|
|
|
#include <net/arp.h>
|
|
|
|
|
#include <linux/ip.h>
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
|
#include <linux/etherdevice.h>
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#include <linux/gsmmux.h>
|
2021-04-08 12:51:34 +00:00
|
|
|
|
#include "tty.h"
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
static int debug;
|
|
|
|
|
module_param(debug, int, 0600);
|
|
|
|
|
|
2022-08-31 07:37:59 +00:00
|
|
|
|
/* Module debug bits */
|
|
|
|
|
#define DBG_DUMP BIT(0) /* Data transmission dump. */
|
|
|
|
|
#define DBG_CD_ON BIT(1) /* Always assume CD line on. */
|
|
|
|
|
#define DBG_DATA BIT(2) /* Data transmission details. */
|
|
|
|
|
#define DBG_ERRORS BIT(3) /* Details for fail conditions. */
|
|
|
|
|
#define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */
|
2022-08-31 07:38:00 +00:00
|
|
|
|
#define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */
|
2022-08-31 07:37:59 +00:00
|
|
|
|
|
2011-11-08 18:02:10 +00:00
|
|
|
|
/* Defaults: these are from the specification */
|
|
|
|
|
|
|
|
|
|
#define T1 10 /* 100mS */
|
|
|
|
|
#define T2 34 /* 333mS */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
#define T3 10 /* 10s */
|
2011-11-08 18:02:10 +00:00
|
|
|
|
#define N2 3 /* Retry 3 times */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
#define K 2 /* outstanding I frames */
|
|
|
|
|
|
|
|
|
|
#define MAX_T3 255 /* In seconds. */
|
|
|
|
|
#define MAX_WINDOW_SIZE 7 /* Limit of K in error recovery mode. */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Use long timers for testing at low speed with debug on */
|
|
|
|
|
#ifdef DEBUG_TIMING
|
2011-11-08 18:02:10 +00:00
|
|
|
|
#define T1 100
|
|
|
|
|
#define T2 200
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#endif
|
|
|
|
|
|
2010-11-04 15:17:27 +00:00
|
|
|
|
/*
|
2011-03-31 01:57:33 +00:00
|
|
|
|
* Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte
|
2010-11-04 15:17:27 +00:00
|
|
|
|
* limits so this is plenty
|
|
|
|
|
*/
|
2011-06-16 21:20:12 +00:00
|
|
|
|
#define MAX_MRU 1500
|
|
|
|
|
#define MAX_MTU 1500
|
2022-11-03 09:17:41 +00:00
|
|
|
|
#define MIN_MTU (PROT_OVERHEAD + 1)
|
2022-04-14 09:42:13 +00:00
|
|
|
|
/* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */
|
|
|
|
|
#define PROT_OVERHEAD 7
|
2011-06-16 21:20:12 +00:00
|
|
|
|
#define GSM_NET_TX_TIMEOUT (HZ*10)
|
|
|
|
|
|
2020-11-04 19:35:31 +00:00
|
|
|
|
/*
|
2011-06-16 21:20:12 +00:00
|
|
|
|
* struct gsm_mux_net - network interface
|
|
|
|
|
*
|
|
|
|
|
* Created when net interface is initialized.
|
2020-08-18 08:56:52 +00:00
|
|
|
|
*/
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct gsm_mux_net {
|
|
|
|
|
struct kref ref;
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
};
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* Each block of data we have queued to go out is in the form of
|
2011-03-31 01:57:33 +00:00
|
|
|
|
* a gsm_msg which holds everything we need in a link layer independent
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* format
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct gsm_msg {
|
2012-08-13 12:44:59 +00:00
|
|
|
|
struct list_head list;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
u8 addr; /* DLCI address + flags */
|
|
|
|
|
u8 ctrl; /* Control byte + flags */
|
|
|
|
|
unsigned int len; /* Length of data block (can be zero) */
|
2023-12-06 07:37:04 +00:00
|
|
|
|
u8 *data; /* Points into buffer but not at the start */
|
|
|
|
|
u8 buffer[];
|
2010-03-26 11:32:54 +00:00
|
|
|
|
};
|
|
|
|
|
|
2020-02-19 08:49:41 +00:00
|
|
|
|
enum gsm_dlci_state {
|
|
|
|
|
DLCI_CLOSED,
|
2023-03-15 10:53:52 +00:00
|
|
|
|
DLCI_WAITING_CONFIG, /* Waiting for DLCI configuration from user */
|
2022-11-03 09:17:43 +00:00
|
|
|
|
DLCI_CONFIGURE, /* Sending PN (for adaption > 1) */
|
2020-02-19 08:49:41 +00:00
|
|
|
|
DLCI_OPENING, /* Sending SABM not seen UA */
|
|
|
|
|
DLCI_OPEN, /* SABM/UA complete */
|
|
|
|
|
DLCI_CLOSING, /* Sending DISC not seen UA/DM */
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-19 08:49:42 +00:00
|
|
|
|
enum gsm_dlci_mode {
|
|
|
|
|
DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */
|
|
|
|
|
DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */
|
|
|
|
|
};
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* Each active data link has a gsm_dlci structure associated which ties
|
|
|
|
|
* the link layer to an optional tty (if the tty side is open). To avoid
|
|
|
|
|
* complexity right now these are only ever freed up when the mux is
|
|
|
|
|
* shut down.
|
|
|
|
|
*
|
|
|
|
|
* At the moment we don't free DLCI objects until the mux is torn down
|
|
|
|
|
* this avoid object life time issues but might be worth review later.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct gsm_dlci {
|
|
|
|
|
struct gsm_mux *gsm;
|
|
|
|
|
int addr;
|
2020-02-19 08:49:41 +00:00
|
|
|
|
enum gsm_dlci_state state;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct mutex mutex;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Link layer */
|
2020-02-19 08:49:42 +00:00
|
|
|
|
enum gsm_dlci_mode mode;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
spinlock_t lock; /* Protects the internal state */
|
|
|
|
|
struct timer_list t1; /* Retransmit timer for SABM and UA */
|
|
|
|
|
int retries;
|
|
|
|
|
/* Uplink tty if active */
|
|
|
|
|
struct tty_port port; /* The tty bound to this DLCI if there is one */
|
2022-05-04 08:17:33 +00:00
|
|
|
|
#define TX_SIZE 4096 /* Must be power of 2. */
|
2020-02-19 08:49:40 +00:00
|
|
|
|
struct kfifo fifo; /* Queue fifo for the DLCI */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
int adaption; /* Adaption layer in use */
|
2011-06-16 21:20:12 +00:00
|
|
|
|
int prev_adaption;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
u32 modem_rx; /* Our incoming virtual modem lines */
|
|
|
|
|
u32 modem_tx; /* Our outgoing modem lines */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
unsigned int mtu;
|
2020-02-19 08:49:46 +00:00
|
|
|
|
bool dead; /* Refuse re-open */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
/* Configuration */
|
|
|
|
|
u8 prio; /* Priority */
|
|
|
|
|
u8 ftype; /* Frame type */
|
|
|
|
|
u8 k; /* Window size */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Flow control */
|
2020-02-19 08:49:47 +00:00
|
|
|
|
bool throttled; /* Private copy of throttle state */
|
2020-02-19 08:49:48 +00:00
|
|
|
|
bool constipated; /* Throttle status for outgoing */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Packetised I/O */
|
|
|
|
|
struct sk_buff *skb; /* Frame being sent */
|
|
|
|
|
struct sk_buff_head skb_list; /* Queued frames */
|
|
|
|
|
/* Data handling callback */
|
2019-01-14 01:25:27 +00:00
|
|
|
|
void (*data)(struct gsm_dlci *dlci, const u8 *data, int len);
|
|
|
|
|
void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct net_device *net; /* network interface, if created */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
};
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/*
|
|
|
|
|
* Parameter bits used for parameter negotiation according to 3GPP 27.010
|
|
|
|
|
* chapter 5.4.6.3.1.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct gsm_dlci_param_bits {
|
|
|
|
|
u8 d_bits;
|
|
|
|
|
u8 i_cl_bits;
|
|
|
|
|
u8 p_bits;
|
|
|
|
|
u8 t_bits;
|
|
|
|
|
__le16 n_bits;
|
|
|
|
|
u8 na_bits;
|
|
|
|
|
u8 k_bits;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static_assert(sizeof(struct gsm_dlci_param_bits) == 8);
|
|
|
|
|
|
|
|
|
|
#define PN_D_FIELD_DLCI GENMASK(5, 0)
|
|
|
|
|
#define PN_I_CL_FIELD_FTYPE GENMASK(3, 0)
|
|
|
|
|
#define PN_I_CL_FIELD_ADAPTION GENMASK(7, 4)
|
|
|
|
|
#define PN_P_FIELD_PRIO GENMASK(5, 0)
|
|
|
|
|
#define PN_T_FIELD_T1 GENMASK(7, 0)
|
|
|
|
|
#define PN_N_FIELD_N1 GENMASK(15, 0)
|
|
|
|
|
#define PN_NA_FIELD_N2 GENMASK(7, 0)
|
|
|
|
|
#define PN_K_FIELD_K GENMASK(2, 0)
|
|
|
|
|
|
2022-08-31 07:37:56 +00:00
|
|
|
|
/* Total number of supported devices */
|
|
|
|
|
#define GSM_TTY_MINORS 256
|
|
|
|
|
|
2015-05-21 12:06:11 +00:00
|
|
|
|
/* DLCI 0, 62/63 are special or reserved see gsmtty_open */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
#define NUM_DLCI 64
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* DLCI 0 is used to pass control blocks out of band of the data
|
|
|
|
|
* flow (and with a higher link priority). One command can be outstanding
|
|
|
|
|
* at a time and we use this structure to manage them. They are created
|
|
|
|
|
* and destroyed by the user context, and updated by the receive paths
|
|
|
|
|
* and timers
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct gsm_control {
|
|
|
|
|
u8 cmd; /* Command we are issuing */
|
|
|
|
|
u8 *data; /* Data for the command in case we retransmit */
|
|
|
|
|
int len; /* Length of block for retransmission */
|
|
|
|
|
int done; /* Done flag */
|
|
|
|
|
int error; /* Error if any */
|
|
|
|
|
};
|
|
|
|
|
|
2022-08-31 07:37:55 +00:00
|
|
|
|
enum gsm_encoding {
|
|
|
|
|
GSM_BASIC_OPT,
|
|
|
|
|
GSM_ADV_OPT,
|
|
|
|
|
};
|
|
|
|
|
|
2020-02-19 08:49:43 +00:00
|
|
|
|
enum gsm_mux_state {
|
|
|
|
|
GSM_SEARCH,
|
2024-04-24 05:48:42 +00:00
|
|
|
|
GSM0_ADDRESS,
|
|
|
|
|
GSM0_CONTROL,
|
|
|
|
|
GSM0_LEN0,
|
|
|
|
|
GSM0_LEN1,
|
|
|
|
|
GSM0_DATA,
|
|
|
|
|
GSM0_FCS,
|
|
|
|
|
GSM0_SSOF,
|
|
|
|
|
GSM1_START,
|
|
|
|
|
GSM1_ADDRESS,
|
|
|
|
|
GSM1_CONTROL,
|
|
|
|
|
GSM1_DATA,
|
|
|
|
|
GSM1_OVERRUN,
|
2020-02-19 08:49:43 +00:00
|
|
|
|
};
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* Each GSM mux we have is represented by this structure. If we are
|
|
|
|
|
* operating as an ldisc then we use this structure as our ldisc
|
|
|
|
|
* state. We need to sort out lifetimes and locking with respect
|
|
|
|
|
* to the gsm mux array. For now we don't free DLCI objects that
|
|
|
|
|
* have been instantiated until the mux itself is terminated.
|
|
|
|
|
*
|
|
|
|
|
* To consider further: tty open versus mux shutdown.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
struct gsm_mux {
|
|
|
|
|
struct tty_struct *tty; /* The tty our ldisc is bound to */
|
|
|
|
|
spinlock_t lock;
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
struct mutex mutex;
|
2011-06-14 20:35:32 +00:00
|
|
|
|
unsigned int num;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
struct kref ref;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Events on the GSM channel */
|
|
|
|
|
wait_queue_head_t event;
|
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
/* ldisc send work */
|
|
|
|
|
struct work_struct tx_work;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Bits for GSM mode decoding */
|
|
|
|
|
|
|
|
|
|
/* Framing Layer */
|
2023-12-06 07:37:04 +00:00
|
|
|
|
u8 *buf;
|
2020-02-19 08:49:43 +00:00
|
|
|
|
enum gsm_mux_state state;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned int len;
|
|
|
|
|
unsigned int address;
|
|
|
|
|
unsigned int count;
|
2020-02-19 08:49:49 +00:00
|
|
|
|
bool escape;
|
2022-08-31 07:37:55 +00:00
|
|
|
|
enum gsm_encoding encoding;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
u8 control;
|
|
|
|
|
u8 fcs;
|
|
|
|
|
u8 *txframe; /* TX framing buffer */
|
|
|
|
|
|
2020-08-18 08:56:48 +00:00
|
|
|
|
/* Method for the receiver side */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
void (*receive)(struct gsm_mux *gsm, u8 ch);
|
|
|
|
|
|
|
|
|
|
/* Link Layer */
|
|
|
|
|
unsigned int mru;
|
|
|
|
|
unsigned int mtu;
|
|
|
|
|
int initiator; /* Did we initiate connection */
|
2020-02-19 08:49:46 +00:00
|
|
|
|
bool dead; /* Has the mux been shut down */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_dlci *dlci[NUM_DLCI];
|
2022-04-22 07:10:25 +00:00
|
|
|
|
int old_c_iflag; /* termios c_iflag value before attach */
|
2020-02-19 08:49:48 +00:00
|
|
|
|
bool constipated; /* Asked by remote to shut up */
|
2022-07-01 06:16:45 +00:00
|
|
|
|
bool has_devices; /* Devices were registered */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spinlock_t tx_lock;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned int tx_bytes; /* TX data outstanding */
|
|
|
|
|
#define TX_THRESH_HI 8192
|
|
|
|
|
#define TX_THRESH_LO 2048
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
struct list_head tx_ctrl_list; /* Pending control packets */
|
|
|
|
|
struct list_head tx_data_list; /* Pending data packets */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Control messages */
|
2022-10-08 11:02:21 +00:00
|
|
|
|
struct timer_list kick_timer; /* Kick TX queuing on timeout */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct timer_list t2_timer; /* Retransmit timer for commands */
|
|
|
|
|
int cretries; /* Command retry counter */
|
|
|
|
|
struct gsm_control *pending_cmd;/* Our current pending command */
|
|
|
|
|
spinlock_t control_lock; /* Protects the pending command */
|
|
|
|
|
|
2023-02-14 12:27:37 +00:00
|
|
|
|
/* Keep-alive */
|
|
|
|
|
struct timer_list ka_timer; /* Keep-alive response timer */
|
|
|
|
|
u8 ka_num; /* Keep-alive match pattern */
|
|
|
|
|
signed int ka_retries; /* Keep-alive retry counter, -1 if not yet initialized */
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Configuration */
|
|
|
|
|
int adaption; /* 1 or 2 supported */
|
|
|
|
|
u8 ftype; /* UI or UIH */
|
|
|
|
|
int t1, t2; /* Timers in 1/100th of a sec */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
unsigned int t3; /* Power wake-up timer in seconds. */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
int n2; /* Retry count */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
u8 k; /* Window size */
|
2023-03-15 10:53:52 +00:00
|
|
|
|
bool wait_config; /* Wait for configuration by ioctl before DLCI open */
|
2023-02-14 12:27:37 +00:00
|
|
|
|
u32 keep_alive; /* Control channel keep-alive in 10ms */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Statistics (not currently exposed) */
|
|
|
|
|
unsigned long bad_fcs;
|
|
|
|
|
unsigned long malformed;
|
|
|
|
|
unsigned long io_error;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
unsigned long open_error;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned long bad_size;
|
|
|
|
|
unsigned long unsupported;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Mux objects - needed so that we can translate a tty index into the
|
|
|
|
|
* relevant mux and DLCI.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#define MAX_MUX 4 /* 256 minors */
|
|
|
|
|
static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */
|
2021-04-06 11:56:03 +00:00
|
|
|
|
static DEFINE_SPINLOCK(gsm_mux_lock);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2011-06-14 20:35:32 +00:00
|
|
|
|
static struct tty_driver *gsm_tty_driver;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* This section of the driver logic implements the GSM encodings
|
|
|
|
|
* both the basic and the 'advanced'. Reliable transport is not
|
|
|
|
|
* supported.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#define CR 0x02
|
|
|
|
|
#define EA 0x01
|
|
|
|
|
#define PF 0x10
|
|
|
|
|
|
|
|
|
|
/* I is special: the rest are ..*/
|
|
|
|
|
#define RR 0x01
|
|
|
|
|
#define UI 0x03
|
|
|
|
|
#define RNR 0x05
|
|
|
|
|
#define REJ 0x09
|
|
|
|
|
#define DM 0x0F
|
|
|
|
|
#define SABM 0x2F
|
|
|
|
|
#define DISC 0x43
|
|
|
|
|
#define UA 0x63
|
|
|
|
|
#define UIH 0xEF
|
|
|
|
|
|
|
|
|
|
/* Channel commands */
|
|
|
|
|
#define CMD_NSC 0x09
|
|
|
|
|
#define CMD_TEST 0x11
|
|
|
|
|
#define CMD_PSC 0x21
|
|
|
|
|
#define CMD_RLS 0x29
|
|
|
|
|
#define CMD_FCOFF 0x31
|
|
|
|
|
#define CMD_PN 0x41
|
|
|
|
|
#define CMD_RPN 0x49
|
|
|
|
|
#define CMD_FCON 0x51
|
|
|
|
|
#define CMD_CLD 0x61
|
|
|
|
|
#define CMD_SNC 0x69
|
|
|
|
|
#define CMD_MSC 0x71
|
|
|
|
|
|
|
|
|
|
/* Virtual modem bits */
|
|
|
|
|
#define MDM_FC 0x01
|
|
|
|
|
#define MDM_RTC 0x02
|
|
|
|
|
#define MDM_RTR 0x04
|
|
|
|
|
#define MDM_IC 0x20
|
|
|
|
|
#define MDM_DV 0x40
|
|
|
|
|
|
|
|
|
|
#define GSM0_SOF 0xF9
|
2010-11-04 15:17:27 +00:00
|
|
|
|
#define GSM1_SOF 0x7E
|
2010-03-26 11:32:54 +00:00
|
|
|
|
#define GSM1_ESCAPE 0x7D
|
|
|
|
|
#define GSM1_ESCAPE_BITS 0x20
|
|
|
|
|
#define XON 0x11
|
|
|
|
|
#define XOFF 0x13
|
2022-01-20 10:18:57 +00:00
|
|
|
|
#define ISO_IEC_646_MASK 0x7F
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
static const struct tty_port_operations gsm_port_ops;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* CRC table for GSM 0710
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static const u8 gsm_fcs8[256] = {
|
|
|
|
|
0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75,
|
|
|
|
|
0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B,
|
|
|
|
|
0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69,
|
|
|
|
|
0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67,
|
|
|
|
|
0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D,
|
|
|
|
|
0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43,
|
|
|
|
|
0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51,
|
|
|
|
|
0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F,
|
|
|
|
|
0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05,
|
|
|
|
|
0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B,
|
|
|
|
|
0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19,
|
|
|
|
|
0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17,
|
|
|
|
|
0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D,
|
|
|
|
|
0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33,
|
|
|
|
|
0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21,
|
|
|
|
|
0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F,
|
|
|
|
|
0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95,
|
|
|
|
|
0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B,
|
|
|
|
|
0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89,
|
|
|
|
|
0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87,
|
|
|
|
|
0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD,
|
|
|
|
|
0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3,
|
|
|
|
|
0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1,
|
|
|
|
|
0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF,
|
|
|
|
|
0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5,
|
|
|
|
|
0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB,
|
|
|
|
|
0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9,
|
|
|
|
|
0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7,
|
|
|
|
|
0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD,
|
|
|
|
|
0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3,
|
|
|
|
|
0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1,
|
|
|
|
|
0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define INIT_FCS 0xFF
|
|
|
|
|
#define GOOD_FCS 0xCF
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
static void gsm_dlci_close(struct gsm_dlci *dlci);
|
2020-08-18 08:56:48 +00:00
|
|
|
|
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len);
|
2022-04-22 07:10:24 +00:00
|
|
|
|
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
|
|
|
|
|
u8 ctrl);
|
|
|
|
|
static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg);
|
2023-03-15 10:53:52 +00:00
|
|
|
|
static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
static void gsmld_write_trigger(struct gsm_mux *gsm);
|
|
|
|
|
static void gsmld_write_task(struct work_struct *work);
|
2020-08-18 08:56:48 +00:00
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_fcs_add - update FCS
|
|
|
|
|
* @fcs: Current FCS
|
|
|
|
|
* @c: Next data
|
|
|
|
|
*
|
|
|
|
|
* Update the FCS to include c. Uses the algorithm in the specification
|
|
|
|
|
* notes.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static inline u8 gsm_fcs_add(u8 fcs, u8 c)
|
|
|
|
|
{
|
|
|
|
|
return gsm_fcs8[fcs ^ c];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_fcs_add_block - update FCS for a block
|
|
|
|
|
* @fcs: Current FCS
|
|
|
|
|
* @c: buffer of data
|
|
|
|
|
* @len: length of buffer
|
|
|
|
|
*
|
|
|
|
|
* Update the FCS to include c. Uses the algorithm in the specification
|
|
|
|
|
* notes.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len)
|
|
|
|
|
{
|
|
|
|
|
while (len--)
|
|
|
|
|
fcs = gsm_fcs8[fcs ^ *c++];
|
|
|
|
|
return fcs;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_read_ea - read a byte into an EA
|
|
|
|
|
* @val: variable holding value
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @c: byte going into the EA
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Processes one byte of an EA. Updates the passed variable
|
|
|
|
|
* and returns 1 if the EA is now completely read
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_read_ea(unsigned int *val, u8 c)
|
|
|
|
|
{
|
|
|
|
|
/* Add the next 7 bits into the value */
|
|
|
|
|
*val <<= 7;
|
|
|
|
|
*val |= c >> 1;
|
|
|
|
|
/* Was this the last byte of the EA 1 = yes*/
|
|
|
|
|
return c & EA;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-01 06:16:48 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_read_ea_val - read a value until EA
|
|
|
|
|
* @val: variable holding value
|
|
|
|
|
* @data: buffer of data
|
|
|
|
|
* @dlen: length of data
|
|
|
|
|
*
|
|
|
|
|
* Processes an EA value. Updates the passed variable and
|
|
|
|
|
* returns the processed data length.
|
|
|
|
|
*/
|
|
|
|
|
static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen)
|
|
|
|
|
{
|
|
|
|
|
unsigned int len = 0;
|
|
|
|
|
|
|
|
|
|
for (; dlen > 0; dlen--) {
|
|
|
|
|
len++;
|
|
|
|
|
if (gsm_read_ea(val, *data++))
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
return len;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_encode_modem - encode modem data bits
|
|
|
|
|
* @dlci: DLCI to encode from
|
|
|
|
|
*
|
|
|
|
|
* Returns the correct GSM encoded modem status bits (6 bit field) for
|
|
|
|
|
* the current status of the DLCI and attached tty object
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
u8 modembits = 0;
|
|
|
|
|
/* FC is true flow control not modem bits */
|
|
|
|
|
if (dlci->throttled)
|
|
|
|
|
modembits |= MDM_FC;
|
|
|
|
|
if (dlci->modem_tx & TIOCM_DTR)
|
|
|
|
|
modembits |= MDM_RTC;
|
|
|
|
|
if (dlci->modem_tx & TIOCM_RTS)
|
|
|
|
|
modembits |= MDM_RTR;
|
|
|
|
|
if (dlci->modem_tx & TIOCM_RI)
|
|
|
|
|
modembits |= MDM_IC;
|
2022-02-18 07:31:17 +00:00
|
|
|
|
if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
modembits |= MDM_DV;
|
2023-02-06 11:46:05 +00:00
|
|
|
|
/* special mappings for passive side to operate as UE */
|
|
|
|
|
if (dlci->modem_tx & TIOCM_OUT1)
|
|
|
|
|
modembits |= MDM_IC;
|
|
|
|
|
if (dlci->modem_tx & TIOCM_OUT2)
|
|
|
|
|
modembits |= MDM_DV;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return modembits;
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-12 13:15:06 +00:00
|
|
|
|
static void gsm_hex_dump_bytes(const char *fname, const u8 *data,
|
|
|
|
|
unsigned long len)
|
|
|
|
|
{
|
|
|
|
|
char *prefix;
|
|
|
|
|
|
|
|
|
|
if (!fname) {
|
|
|
|
|
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len,
|
|
|
|
|
true);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2022-05-23 15:50:52 +00:00
|
|
|
|
prefix = kasprintf(GFP_ATOMIC, "%s: ", fname);
|
2022-05-12 13:15:06 +00:00
|
|
|
|
if (!prefix)
|
|
|
|
|
return;
|
|
|
|
|
print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len,
|
|
|
|
|
true);
|
|
|
|
|
kfree(prefix);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_encode_params - encode DLCI parameters
|
|
|
|
|
* @dlci: DLCI to encode from
|
|
|
|
|
* @params: buffer to fill with the encoded parameters
|
|
|
|
|
*
|
|
|
|
|
* Encodes the parameters according to GSM 07.10 section 5.4.6.3.1
|
|
|
|
|
* table 3.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_encode_params(const struct gsm_dlci *dlci,
|
|
|
|
|
struct gsm_dlci_param_bits *params)
|
|
|
|
|
{
|
|
|
|
|
const struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
unsigned int i, cl;
|
|
|
|
|
|
|
|
|
|
switch (dlci->ftype) {
|
|
|
|
|
case UIH:
|
|
|
|
|
i = 0; /* UIH */
|
|
|
|
|
break;
|
|
|
|
|
case UI:
|
|
|
|
|
i = 1; /* UI */
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
pr_debug("unsupported frame type %d\n", dlci->ftype);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (dlci->adaption) {
|
|
|
|
|
case 1: /* Unstructured */
|
|
|
|
|
cl = 0; /* convergence layer type 1 */
|
|
|
|
|
break;
|
|
|
|
|
case 2: /* Unstructured with modem bits. */
|
|
|
|
|
cl = 1; /* convergence layer type 2 */
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
pr_debug("unsupported adaption %d\n", dlci->adaption);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
params->d_bits = FIELD_PREP(PN_D_FIELD_DLCI, dlci->addr);
|
|
|
|
|
/* UIH, convergence layer type 1 */
|
|
|
|
|
params->i_cl_bits = FIELD_PREP(PN_I_CL_FIELD_FTYPE, i) |
|
|
|
|
|
FIELD_PREP(PN_I_CL_FIELD_ADAPTION, cl);
|
|
|
|
|
params->p_bits = FIELD_PREP(PN_P_FIELD_PRIO, dlci->prio);
|
|
|
|
|
params->t_bits = FIELD_PREP(PN_T_FIELD_T1, gsm->t1);
|
|
|
|
|
params->n_bits = cpu_to_le16(FIELD_PREP(PN_N_FIELD_N1, dlci->mtu));
|
|
|
|
|
params->na_bits = FIELD_PREP(PN_NA_FIELD_N2, gsm->n2);
|
|
|
|
|
params->k_bits = FIELD_PREP(PN_K_FIELD_K, dlci->k);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-01 06:16:45 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_register_devices - register all tty devices for a given mux index
|
|
|
|
|
*
|
|
|
|
|
* @driver: the tty driver that describes the tty devices
|
|
|
|
|
* @index: the mux number is used to calculate the minor numbers of the
|
|
|
|
|
* ttys for this mux and may differ from the position in the
|
|
|
|
|
* mux array.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_register_devices(struct tty_driver *driver, unsigned int index)
|
|
|
|
|
{
|
|
|
|
|
struct device *dev;
|
|
|
|
|
int i;
|
|
|
|
|
unsigned int base;
|
|
|
|
|
|
|
|
|
|
if (!driver || index >= MAX_MUX)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
base = index * NUM_DLCI; /* first minor for this index */
|
|
|
|
|
for (i = 1; i < NUM_DLCI; i++) {
|
|
|
|
|
/* Don't register device 0 - this is the control channel
|
|
|
|
|
* and not a usable tty interface
|
|
|
|
|
*/
|
|
|
|
|
dev = tty_register_device(gsm_tty_driver, base + i, NULL);
|
|
|
|
|
if (IS_ERR(dev)) {
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_ERRORS)
|
2022-07-01 06:16:45 +00:00
|
|
|
|
pr_info("%s failed to register device minor %u",
|
|
|
|
|
__func__, base + i);
|
|
|
|
|
for (i--; i >= 1; i--)
|
|
|
|
|
tty_unregister_device(gsm_tty_driver, base + i);
|
|
|
|
|
return PTR_ERR(dev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_unregister_devices - unregister all tty devices for a given mux index
|
|
|
|
|
*
|
|
|
|
|
* @driver: the tty driver that describes the tty devices
|
|
|
|
|
* @index: the mux number is used to calculate the minor numbers of the
|
|
|
|
|
* ttys for this mux and may differ from the position in the
|
|
|
|
|
* mux array.
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_unregister_devices(struct tty_driver *driver,
|
|
|
|
|
unsigned int index)
|
|
|
|
|
{
|
|
|
|
|
int i;
|
|
|
|
|
unsigned int base;
|
|
|
|
|
|
|
|
|
|
if (!driver || index >= MAX_MUX)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
base = index * NUM_DLCI; /* first minor for this index */
|
|
|
|
|
for (i = 1; i < NUM_DLCI; i++) {
|
|
|
|
|
/* Don't unregister device 0 - this is the control
|
|
|
|
|
* channel and not a usable tty interface
|
|
|
|
|
*/
|
|
|
|
|
tty_unregister_device(gsm_tty_driver, base + i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_print_packet - display a frame for debug
|
|
|
|
|
* @hdr: header to print before decode
|
|
|
|
|
* @addr: address EA from the frame
|
2022-02-18 07:31:18 +00:00
|
|
|
|
* @cr: C/R bit seen as initiator
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @control: control including PF bit
|
|
|
|
|
* @data: following data bytes
|
|
|
|
|
* @dlen: length of data
|
|
|
|
|
*
|
|
|
|
|
* Displays a packet in human readable format for debugging purposes. The
|
|
|
|
|
* style is based on amateur radio LAP-B dump display.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_print_packet(const char *hdr, int addr, int cr,
|
|
|
|
|
u8 control, const u8 *data, int dlen)
|
|
|
|
|
{
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (!(debug & DBG_DUMP))
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
2022-08-31 07:38:00 +00:00
|
|
|
|
/* Only show user payload frames if debug & DBG_PAYLOAD */
|
|
|
|
|
if (!(debug & DBG_PAYLOAD) && addr != 0)
|
|
|
|
|
if ((control & ~PF) == UI || (control & ~PF) == UIH)
|
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
switch (control & ~PF) {
|
|
|
|
|
case SABM:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("SABM");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case UA:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("UA");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case DISC:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("DISC");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case DM:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("DM");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case UI:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("UI");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case UIH:
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("UIH");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
if (!(control & 0x01)) {
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("I N(S)%d N(R)%d",
|
2012-09-17 11:02:35 +00:00
|
|
|
|
(control & 0x0E) >> 1, (control & 0xE0) >> 5);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
} else switch (control & 0x0F) {
|
2010-11-04 15:17:27 +00:00
|
|
|
|
case RR:
|
|
|
|
|
pr_cont("RR(%d)", (control & 0xE0) >> 5);
|
|
|
|
|
break;
|
|
|
|
|
case RNR:
|
|
|
|
|
pr_cont("RNR(%d)", (control & 0xE0) >> 5);
|
|
|
|
|
break;
|
|
|
|
|
case REJ:
|
|
|
|
|
pr_cont("REJ(%d)", (control & 0xE0) >> 5);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
pr_cont("[%02X]", control);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (control & PF)
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("(P)");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
else
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_cont("(F)");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-05-12 13:15:06 +00:00
|
|
|
|
gsm_hex_dump_bytes(NULL, data, dlen);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Link level transmission side
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/**
|
2021-05-20 12:19:05 +00:00
|
|
|
|
* gsm_stuff_frame - bytestuff a packet
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @input: input buffer
|
|
|
|
|
* @output: output buffer
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @len: length of input
|
|
|
|
|
*
|
|
|
|
|
* Expand a buffer by bytestuffing it. The worst case size change
|
|
|
|
|
* is doubling and the caller is responsible for handing out
|
|
|
|
|
* suitable sized buffers.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
|
|
|
|
|
{
|
|
|
|
|
int olen = 0;
|
|
|
|
|
while (len--) {
|
|
|
|
|
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
|
2022-01-20 10:18:57 +00:00
|
|
|
|
|| (*input & ISO_IEC_646_MASK) == XON
|
|
|
|
|
|| (*input & ISO_IEC_646_MASK) == XOFF) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*output++ = GSM1_ESCAPE;
|
|
|
|
|
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
|
|
|
|
|
olen++;
|
|
|
|
|
} else
|
|
|
|
|
*output++ = *input++;
|
|
|
|
|
olen++;
|
|
|
|
|
}
|
|
|
|
|
return olen;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_send - send a control frame
|
|
|
|
|
* @gsm: our GSM mux
|
|
|
|
|
* @addr: address for control frame
|
2022-02-18 07:31:18 +00:00
|
|
|
|
* @cr: command/response bit seen as initiator
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @control: control byte including PF bit
|
|
|
|
|
*
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
* Format up and transmit a control frame. These should be transmitted
|
|
|
|
|
* ahead of data when they are needed.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
struct gsm_msg *msg;
|
|
|
|
|
u8 *dp;
|
2022-02-18 07:31:18 +00:00
|
|
|
|
int ocr;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
|
|
|
|
|
msg = gsm_data_alloc(gsm, addr, 0, control);
|
|
|
|
|
if (!msg)
|
|
|
|
|
return -ENOMEM;
|
2022-02-18 07:31:18 +00:00
|
|
|
|
|
|
|
|
|
/* toggle C/R coding if not initiator */
|
|
|
|
|
ocr = cr ^ (gsm->initiator ? 0 : 1);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
msg->data -= 3;
|
|
|
|
|
dp = msg->data;
|
|
|
|
|
*dp++ = (addr << 2) | (ocr << 1) | EA;
|
|
|
|
|
*dp++ = control;
|
|
|
|
|
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT)
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
*dp++ = EA; /* Length of data = 0 */
|
|
|
|
|
|
|
|
|
|
*dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data);
|
|
|
|
|
msg->len = (dp - msg->data) + 1;
|
|
|
|
|
|
|
|
|
|
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
|
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
|
|
|
|
|
gsm->tx_bytes += msg->len;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsmld_write_trigger(gsm);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_clear_queues - remove outstanding data for a DLCI
|
|
|
|
|
* @gsm: mux
|
|
|
|
|
* @dlci: clear for this DLCI
|
|
|
|
|
*
|
|
|
|
|
* Clears the data queues for a given DLCI.
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_msg *msg, *nmsg;
|
|
|
|
|
int addr = dlci->addr;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
/* Clear DLCI write fifo first */
|
|
|
|
|
spin_lock_irqsave(&dlci->lock, flags);
|
|
|
|
|
kfifo_reset(&dlci->fifo);
|
|
|
|
|
spin_unlock_irqrestore(&dlci->lock, flags);
|
|
|
|
|
|
|
|
|
|
/* Clear data packets in MUX write queue */
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
|
|
|
|
|
if (msg->addr != addr)
|
|
|
|
|
continue;
|
|
|
|
|
gsm->tx_bytes -= msg->len;
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
2021-08-20 12:17:48 +00:00
|
|
|
|
}
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_response - send a control response
|
|
|
|
|
* @gsm: our GSM mux
|
|
|
|
|
* @addr: address for control frame
|
|
|
|
|
* @control: control byte including PF bit
|
|
|
|
|
*
|
|
|
|
|
* Format up and transmit a link level response frame.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static inline void gsm_response(struct gsm_mux *gsm, int addr, int control)
|
|
|
|
|
{
|
2022-02-18 07:31:18 +00:00
|
|
|
|
gsm_send(gsm, addr, 0, control);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_command - send a control command
|
|
|
|
|
* @gsm: our GSM mux
|
|
|
|
|
* @addr: address for control frame
|
|
|
|
|
* @control: control byte including PF bit
|
|
|
|
|
*
|
|
|
|
|
* Format up and transmit a link level command frame.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static inline void gsm_command(struct gsm_mux *gsm, int addr, int control)
|
|
|
|
|
{
|
|
|
|
|
gsm_send(gsm, addr, 1, control);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Data transmission */
|
|
|
|
|
|
|
|
|
|
#define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_data_alloc - allocate data frame
|
|
|
|
|
* @gsm: GSM mux
|
|
|
|
|
* @addr: DLCI address
|
|
|
|
|
* @len: length excluding header and FCS
|
|
|
|
|
* @ctrl: control byte
|
|
|
|
|
*
|
|
|
|
|
* Allocate a new data buffer for sending frames with data. Space is left
|
|
|
|
|
* at the front for header bytes but that is treated as an implementation
|
|
|
|
|
* detail and not for the high level code to use
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len,
|
|
|
|
|
u8 ctrl)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN,
|
|
|
|
|
GFP_ATOMIC);
|
|
|
|
|
if (m == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */
|
|
|
|
|
m->len = len;
|
|
|
|
|
m->addr = addr;
|
|
|
|
|
m->ctrl = ctrl;
|
2012-08-13 12:44:59 +00:00
|
|
|
|
INIT_LIST_HEAD(&m->list);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return m;
|
|
|
|
|
}
|
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_send_packet - sends a single packet
|
|
|
|
|
* @gsm: GSM Mux
|
|
|
|
|
* @msg: packet to send
|
|
|
|
|
*
|
|
|
|
|
* The given packet is encoded and sent out. No memory is freed.
|
|
|
|
|
* The caller must hold the gsm tx lock.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg)
|
|
|
|
|
{
|
|
|
|
|
int len, ret;
|
|
|
|
|
|
|
|
|
|
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT) {
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsm->txframe[0] = GSM0_SOF;
|
|
|
|
|
memcpy(gsm->txframe + 1, msg->data, msg->len);
|
|
|
|
|
gsm->txframe[msg->len + 1] = GSM0_SOF;
|
|
|
|
|
len = msg->len + 2;
|
|
|
|
|
} else {
|
|
|
|
|
gsm->txframe[0] = GSM1_SOF;
|
|
|
|
|
len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len);
|
|
|
|
|
gsm->txframe[len + 1] = GSM1_SOF;
|
|
|
|
|
len += 2;
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_DATA)
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsm_hex_dump_bytes(__func__, gsm->txframe, len);
|
|
|
|
|
gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data,
|
|
|
|
|
msg->len);
|
|
|
|
|
|
|
|
|
|
ret = gsmld_output(gsm, gsm->txframe, len);
|
|
|
|
|
if (ret <= 0)
|
|
|
|
|
return ret;
|
|
|
|
|
/* FIXME: Can eliminate one SOF in many more cases */
|
|
|
|
|
gsm->tx_bytes -= msg->len;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-01 06:16:48 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_is_flow_ctrl_msg - checks if flow control message
|
|
|
|
|
* @msg: message to check
|
|
|
|
|
*
|
|
|
|
|
* Returns true if the given message is a flow control command of the
|
|
|
|
|
* control channel. False is returned in any other case.
|
|
|
|
|
*/
|
|
|
|
|
static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg)
|
|
|
|
|
{
|
|
|
|
|
unsigned int cmd;
|
|
|
|
|
|
|
|
|
|
if (msg->addr > 0)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
switch (msg->ctrl & ~PF) {
|
|
|
|
|
case UI:
|
|
|
|
|
case UIH:
|
|
|
|
|
cmd = 0;
|
|
|
|
|
if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1)
|
|
|
|
|
break;
|
|
|
|
|
switch (cmd & ~PF) {
|
|
|
|
|
case CMD_FCOFF:
|
|
|
|
|
case CMD_FCON:
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
* gsm_data_kick - poke the queue
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @gsm: GSM Mux
|
|
|
|
|
*
|
|
|
|
|
* The tty device has called us to indicate that room has appeared in
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
* the transmit queue. Ram more data into the pipe if we have any.
|
2012-08-13 12:43:58 +00:00
|
|
|
|
* If we have been flow-stopped by a CMD_FCOFF, then we can only
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
* send messages on DLCI0 until CMD_FCON. The caller must hold
|
|
|
|
|
* the gsm tx lock.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
static int gsm_data_kick(struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2012-08-13 12:44:59 +00:00
|
|
|
|
struct gsm_msg *msg, *nmsg;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
/* Serialize control messages and control channel messages first */
|
|
|
|
|
list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) {
|
2022-07-01 06:16:48 +00:00
|
|
|
|
if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg))
|
2022-07-07 11:32:22 +00:00
|
|
|
|
continue;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
ret = gsm_send_packet(gsm, msg);
|
|
|
|
|
switch (ret) {
|
|
|
|
|
case -ENOSPC:
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
case -ENODEV:
|
|
|
|
|
/* ldisc not open */
|
|
|
|
|
gsm->tx_bytes -= msg->len;
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
2012-08-13 12:43:58 +00:00
|
|
|
|
continue;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
default:
|
|
|
|
|
if (ret >= 0) {
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
2012-08-13 12:43:58 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
if (gsm->constipated)
|
|
|
|
|
return -EAGAIN;
|
2020-05-12 11:53:23 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
/* Serialize other channels */
|
|
|
|
|
if (list_empty(&gsm->tx_data_list))
|
|
|
|
|
return 0;
|
|
|
|
|
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
|
|
|
|
|
dlci = gsm->dlci[msg->addr];
|
|
|
|
|
/* Send only messages for DLCIs with valid state */
|
|
|
|
|
if (dlci->state != DLCI_OPEN) {
|
|
|
|
|
gsm->tx_bytes -= msg->len;
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
ret = gsm_send_packet(gsm, msg);
|
|
|
|
|
switch (ret) {
|
|
|
|
|
case -ENOSPC:
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
case -ENODEV:
|
|
|
|
|
/* ldisc not open */
|
|
|
|
|
gsm->tx_bytes -= msg->len;
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
|
|
|
|
continue;
|
|
|
|
|
default:
|
|
|
|
|
if (ret >= 0) {
|
|
|
|
|
list_del(&msg->list);
|
|
|
|
|
kfree(msg);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2020-05-12 11:53:23 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
|
|
|
|
|
return 1;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* __gsm_data_queue - queue a UI or UIH frame
|
|
|
|
|
* @dlci: DLCI sending the data
|
|
|
|
|
* @msg: message queued
|
|
|
|
|
*
|
|
|
|
|
* Add data to the transmit queue and try and get stuff moving
|
|
|
|
|
* out of the mux tty if not already doing so. The Caller must hold
|
|
|
|
|
* the gsm tx lock.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
u8 *dp = msg->data;
|
|
|
|
|
u8 *fcs = dp + msg->len;
|
|
|
|
|
|
|
|
|
|
/* Fill in the header */
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (msg->len < 128)
|
|
|
|
|
*--dp = (msg->len << 1) | EA;
|
|
|
|
|
else {
|
2010-12-13 15:27:27 +00:00
|
|
|
|
*--dp = (msg->len >> 7); /* bits 7 - 15 */
|
|
|
|
|
*--dp = (msg->len & 127) << 1; /* bits 0 - 6 */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*--dp = msg->ctrl;
|
|
|
|
|
if (gsm->initiator)
|
2022-04-20 10:13:46 +00:00
|
|
|
|
*--dp = (msg->addr << 2) | CR | EA;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
else
|
|
|
|
|
*--dp = (msg->addr << 2) | EA;
|
|
|
|
|
*fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp);
|
|
|
|
|
/* Ugly protocol layering violation */
|
|
|
|
|
if (msg->ctrl == UI || msg->ctrl == (UI|PF))
|
|
|
|
|
*fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len);
|
|
|
|
|
*fcs = 0xFF - *fcs;
|
|
|
|
|
|
|
|
|
|
gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl,
|
|
|
|
|
msg->data, msg->len);
|
|
|
|
|
|
|
|
|
|
/* Move the header back and adjust the length, also allow for the FCS
|
|
|
|
|
now tacked on the end */
|
|
|
|
|
msg->len += (msg->data - dp) + 1;
|
|
|
|
|
msg->data = dp;
|
|
|
|
|
|
|
|
|
|
/* Add to the actual output queue */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
switch (msg->ctrl & ~PF) {
|
|
|
|
|
case UI:
|
|
|
|
|
case UIH:
|
|
|
|
|
if (msg->addr > 0) {
|
|
|
|
|
list_add_tail(&msg->list, &gsm->tx_data_list);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
fallthrough;
|
|
|
|
|
default:
|
|
|
|
|
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
|
|
|
|
|
break;
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->tx_bytes += msg->len;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
|
|
|
|
|
gsmld_write_trigger(gsm);
|
2022-10-08 11:02:21 +00:00
|
|
|
|
mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_data_queue - queue a UI or UIH frame
|
|
|
|
|
* @dlci: DLCI sending the data
|
|
|
|
|
* @msg: message queued
|
|
|
|
|
*
|
|
|
|
|
* Add data to the transmit queue and try and get stuff moving
|
|
|
|
|
* out of the mux tty if not already doing so. Take the
|
|
|
|
|
* the gsm tx lock and dlci lock.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
|
|
|
|
|
{
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
__gsm_data_queue(dlci, msg);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_data_output - try and push data out of a DLCI
|
|
|
|
|
* @gsm: mux
|
|
|
|
|
* @dlci: the DLCI to pull data from
|
|
|
|
|
*
|
|
|
|
|
* Pull data from a DLCI and send it into the transmit queue if there
|
|
|
|
|
* is data. Keep to the MRU of the mux. This path handles the usual tty
|
|
|
|
|
* interface which is a byte stream with optional modem data.
|
|
|
|
|
*
|
2022-10-08 11:02:20 +00:00
|
|
|
|
* Caller must hold the tx_lock of the mux.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_msg *msg;
|
|
|
|
|
u8 *dp;
|
2022-07-01 06:16:46 +00:00
|
|
|
|
int h, len, size;
|
|
|
|
|
|
|
|
|
|
/* for modem bits without break data */
|
|
|
|
|
h = ((dlci->adaption == 1) ? 0 : 1);
|
|
|
|
|
|
|
|
|
|
len = kfifo_len(&dlci->fifo);
|
|
|
|
|
if (len == 0)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* MTU/MRU count only the data bits but watch adaption mode */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
if ((len + h) > dlci->mtu)
|
|
|
|
|
len = dlci->mtu - h;
|
2022-07-01 06:16:46 +00:00
|
|
|
|
|
|
|
|
|
size = len + h;
|
|
|
|
|
|
2022-11-03 09:17:42 +00:00
|
|
|
|
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
|
2022-07-01 06:16:46 +00:00
|
|
|
|
if (!msg)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
dp = msg->data;
|
|
|
|
|
switch (dlci->adaption) {
|
|
|
|
|
case 1: /* Unstructured */
|
|
|
|
|
break;
|
|
|
|
|
case 2: /* Unstructured with modem bits.
|
|
|
|
|
* Always one byte as we never send inline break data
|
|
|
|
|
*/
|
|
|
|
|
*dp++ = (gsm_encode_modem(dlci) << 1) | EA;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
pr_err("%s: unsupported adaption %d\n", __func__,
|
|
|
|
|
dlci->adaption);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2022-07-01 06:16:46 +00:00
|
|
|
|
|
|
|
|
|
WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len,
|
|
|
|
|
&dlci->lock));
|
|
|
|
|
|
|
|
|
|
/* Notify upper layer about available send space. */
|
|
|
|
|
tty_port_tty_wakeup(&dlci->port);
|
|
|
|
|
|
|
|
|
|
__gsm_data_queue(dlci, msg);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Bytes of data we used up */
|
2022-07-01 06:16:46 +00:00
|
|
|
|
return size;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_data_output_framed - try and push data out of a DLCI
|
|
|
|
|
* @gsm: mux
|
|
|
|
|
* @dlci: the DLCI to pull data from
|
|
|
|
|
*
|
|
|
|
|
* Pull data from a DLCI and send it into the transmit queue if there
|
|
|
|
|
* is data. Keep to the MRU of the mux. This path handles framed data
|
|
|
|
|
* queued as skbuffs to the DLCI.
|
|
|
|
|
*
|
2022-10-08 11:02:20 +00:00
|
|
|
|
* Caller must hold the tx_lock of the mux.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
|
|
|
|
|
struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_msg *msg;
|
|
|
|
|
u8 *dp;
|
|
|
|
|
int len, size;
|
|
|
|
|
int last = 0, first = 0;
|
|
|
|
|
int overhead = 0;
|
|
|
|
|
|
|
|
|
|
/* One byte per frame is used for B/F flags */
|
|
|
|
|
if (dlci->adaption == 4)
|
|
|
|
|
overhead = 1;
|
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
/* dlci->skb is locked by tx_lock */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci->skb == NULL) {
|
2012-08-13 12:45:30 +00:00
|
|
|
|
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci->skb == NULL)
|
|
|
|
|
return 0;
|
|
|
|
|
first = 1;
|
|
|
|
|
}
|
|
|
|
|
len = dlci->skb->len + overhead;
|
|
|
|
|
|
|
|
|
|
/* MTU/MRU count only the data bits */
|
2022-11-03 09:17:42 +00:00
|
|
|
|
if (len > dlci->mtu) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci->adaption == 3) {
|
|
|
|
|
/* Over long frame, bin it */
|
2012-08-13 12:45:15 +00:00
|
|
|
|
dev_kfree_skb_any(dlci->skb);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->skb = NULL;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2022-11-03 09:17:42 +00:00
|
|
|
|
len = dlci->mtu;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
} else
|
|
|
|
|
last = 1;
|
|
|
|
|
|
|
|
|
|
size = len + overhead;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
|
2012-08-13 12:45:30 +00:00
|
|
|
|
if (msg == NULL) {
|
|
|
|
|
skb_queue_tail(&dlci->skb_list, dlci->skb);
|
|
|
|
|
dlci->skb = NULL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return -ENOMEM;
|
2012-08-13 12:45:30 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dp = msg->data;
|
|
|
|
|
|
|
|
|
|
if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */
|
|
|
|
|
/* Flag byte to carry the start/end info */
|
|
|
|
|
*dp++ = last << 7 | first << 6 | 1; /* EA */
|
|
|
|
|
len--;
|
|
|
|
|
}
|
2011-06-14 20:23:29 +00:00
|
|
|
|
memcpy(dp, dlci->skb->data, len);
|
|
|
|
|
skb_pull(dlci->skb, len);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
__gsm_data_queue(dlci, msg);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
if (last) {
|
2012-08-13 12:45:15 +00:00
|
|
|
|
dev_kfree_skb_any(dlci->skb);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->skb = NULL;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return size;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-22 07:10:24 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_modem_output - try and push modem status out of a DLCI
|
|
|
|
|
* @gsm: mux
|
|
|
|
|
* @dlci: the DLCI to pull modem status from
|
|
|
|
|
* @brk: break signal
|
|
|
|
|
*
|
|
|
|
|
* Push an empty frame in to the transmit queue to update the modem status
|
|
|
|
|
* bits and to transmit an optional break.
|
|
|
|
|
*
|
2022-10-08 11:02:20 +00:00
|
|
|
|
* Caller must hold the tx_lock of the mux.
|
2022-04-22 07:10:24 +00:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
|
|
|
|
|
u8 brk)
|
|
|
|
|
{
|
|
|
|
|
u8 *dp = NULL;
|
|
|
|
|
struct gsm_msg *msg;
|
2022-04-25 10:47:26 +00:00
|
|
|
|
int size = 0;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
|
|
|
|
|
/* for modem bits without break data */
|
2022-04-25 10:47:26 +00:00
|
|
|
|
switch (dlci->adaption) {
|
|
|
|
|
case 1: /* Unstructured */
|
|
|
|
|
break;
|
|
|
|
|
case 2: /* Unstructured with modem bits. */
|
|
|
|
|
size++;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
if (brk > 0)
|
|
|
|
|
size++;
|
2022-04-25 10:47:26 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2022-04-22 07:10:24 +00:00
|
|
|
|
pr_err("%s: unsupported adaption %d\n", __func__,
|
|
|
|
|
dlci->adaption);
|
2022-04-25 10:47:26 +00:00
|
|
|
|
return -EINVAL;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-11-03 09:17:42 +00:00
|
|
|
|
msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype);
|
2022-04-22 07:10:24 +00:00
|
|
|
|
if (!msg) {
|
|
|
|
|
pr_err("%s: gsm_data_alloc error", __func__);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
dp = msg->data;
|
|
|
|
|
switch (dlci->adaption) {
|
|
|
|
|
case 1: /* Unstructured */
|
|
|
|
|
break;
|
|
|
|
|
case 2: /* Unstructured with modem bits. */
|
|
|
|
|
if (brk == 0) {
|
|
|
|
|
*dp++ = (gsm_encode_modem(dlci) << 1) | EA;
|
|
|
|
|
} else {
|
|
|
|
|
*dp++ = gsm_encode_modem(dlci) << 1;
|
|
|
|
|
*dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
/* Handled above */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
__gsm_data_queue(dlci, msg);
|
|
|
|
|
return size;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_data_sweep - look for data to send
|
|
|
|
|
* @gsm: the GSM mux
|
|
|
|
|
*
|
|
|
|
|
* Sweep the GSM mux channels in priority order looking for ones with
|
|
|
|
|
* data to send. We could do with optimising this scan a bit. We aim
|
|
|
|
|
* to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit
|
|
|
|
|
* TX_THRESH_LO we get called again
|
|
|
|
|
*
|
|
|
|
|
* FIXME: We should round robin between groups and in theory you can
|
|
|
|
|
* renegotiate DLCI priorities with optional stuff. Needs optimising.
|
|
|
|
|
*/
|
|
|
|
|
|
2022-07-01 06:16:47 +00:00
|
|
|
|
static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
/* Priority ordering: We should do priority with RR of the groups */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
int i, len, ret = 0;
|
|
|
|
|
bool sent;
|
|
|
|
|
struct gsm_dlci *dlci;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
while (gsm->tx_bytes < TX_THRESH_HI) {
|
|
|
|
|
for (sent = false, i = 1; i < NUM_DLCI; i++) {
|
|
|
|
|
dlci = gsm->dlci[i];
|
|
|
|
|
/* skip unused or blocked channel */
|
|
|
|
|
if (!dlci || dlci->constipated)
|
|
|
|
|
continue;
|
|
|
|
|
/* skip channels with invalid state */
|
|
|
|
|
if (dlci->state != DLCI_OPEN)
|
|
|
|
|
continue;
|
|
|
|
|
/* count the sent data per adaption */
|
|
|
|
|
if (dlci->adaption < 3 && !dlci->net)
|
|
|
|
|
len = gsm_dlci_data_output(gsm, dlci);
|
|
|
|
|
else
|
|
|
|
|
len = gsm_dlci_data_output_framed(gsm, dlci);
|
|
|
|
|
/* on error exit */
|
|
|
|
|
if (len < 0)
|
|
|
|
|
return ret;
|
|
|
|
|
if (len > 0) {
|
|
|
|
|
ret++;
|
|
|
|
|
sent = true;
|
|
|
|
|
/* The lower DLCs can starve the higher DLCs! */
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
/* try next */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
if (!sent)
|
2010-08-11 01:03:12 +00:00
|
|
|
|
break;
|
2022-11-05 15:26:56 +00:00
|
|
|
|
}
|
2022-07-01 06:16:47 +00:00
|
|
|
|
|
|
|
|
|
return ret;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_data_kick - transmit if possible
|
|
|
|
|
* @dlci: DLCI to kick
|
|
|
|
|
*
|
|
|
|
|
* Transmit data from this DLCI if the queue is empty. We can't rely on
|
|
|
|
|
* a tty wakeup except when we filled the pipe so we need to fire off
|
|
|
|
|
* new data ourselves in other cases.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
2012-08-13 12:43:36 +00:00
|
|
|
|
int sweep;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2013-07-08 19:28:00 +00:00
|
|
|
|
if (dlci->constipated)
|
2012-08-13 12:43:58 +00:00
|
|
|
|
return;
|
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* If we have nothing running then we need to fire up */
|
2012-08-13 12:43:36 +00:00
|
|
|
|
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
if (dlci->gsm->tx_bytes == 0) {
|
|
|
|
|
if (dlci->net)
|
|
|
|
|
gsm_dlci_data_output_framed(dlci->gsm, dlci);
|
|
|
|
|
else
|
|
|
|
|
gsm_dlci_data_output(dlci->gsm, dlci);
|
2012-08-13 12:43:36 +00:00
|
|
|
|
}
|
|
|
|
|
if (sweep)
|
2013-07-08 19:28:00 +00:00
|
|
|
|
gsm_dlci_data_sweep(dlci->gsm);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Control message processing
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
2022-08-31 07:37:58 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_control_command - send a command frame to a control
|
|
|
|
|
* @gsm: gsm channel
|
|
|
|
|
* @cmd: the command to use
|
|
|
|
|
* @data: data to follow encoded info
|
|
|
|
|
* @dlen: length of data
|
|
|
|
|
*
|
|
|
|
|
* Encode up and queue a UI/UIH frame containing our command.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data,
|
|
|
|
|
int dlen)
|
|
|
|
|
{
|
2022-11-03 09:17:42 +00:00
|
|
|
|
struct gsm_msg *msg;
|
2023-08-17 09:32:29 +00:00
|
|
|
|
struct gsm_dlci *dlci = gsm->dlci[0];
|
2022-08-31 07:37:58 +00:00
|
|
|
|
|
2023-08-17 09:32:29 +00:00
|
|
|
|
msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
|
2022-08-31 07:37:58 +00:00
|
|
|
|
if (msg == NULL)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */
|
|
|
|
|
msg->data[1] = (dlen << 1) | EA;
|
|
|
|
|
memcpy(msg->data + 2, data, dlen);
|
2023-08-17 09:32:29 +00:00
|
|
|
|
gsm_data_queue(dlci, msg);
|
2022-08-31 07:37:58 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_control_reply - send a response frame to a control
|
|
|
|
|
* @gsm: gsm channel
|
|
|
|
|
* @cmd: the command to use
|
|
|
|
|
* @data: data to follow encoded info
|
|
|
|
|
* @dlen: length of data
|
|
|
|
|
*
|
|
|
|
|
* Encode up and queue a UI/UIH frame containing our response.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-14 01:25:27 +00:00
|
|
|
|
static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
int dlen)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_msg *msg;
|
2023-08-17 09:32:29 +00:00
|
|
|
|
struct gsm_dlci *dlci = gsm->dlci[0];
|
2022-11-03 09:17:42 +00:00
|
|
|
|
|
2023-08-17 09:32:29 +00:00
|
|
|
|
msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
|
2010-12-13 15:28:03 +00:00
|
|
|
|
if (msg == NULL)
|
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
|
|
|
|
|
msg->data[1] = (dlen << 1) | EA;
|
|
|
|
|
memcpy(msg->data + 2, data, dlen);
|
2023-08-17 09:32:29 +00:00
|
|
|
|
gsm_data_queue(dlci, msg);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_process_modem - process received modem status
|
|
|
|
|
* @tty: virtual tty bound to the DLCI
|
|
|
|
|
* @dlci: DLCI to affect
|
|
|
|
|
* @modem: modem bits (full EA)
|
2022-02-18 07:31:22 +00:00
|
|
|
|
* @slen: number of signal octets
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Used when a modem control message or line state inline in adaption
|
|
|
|
|
* layer 2 is processed. Sort out the local modem state and throttles
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
|
2022-02-18 07:31:22 +00:00
|
|
|
|
u32 modem, int slen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
int mlines = 0;
|
2011-06-14 20:23:28 +00:00
|
|
|
|
u8 brk = 0;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
int fc;
|
2011-06-14 20:23:28 +00:00
|
|
|
|
|
2022-02-18 07:31:22 +00:00
|
|
|
|
/* The modem status command can either contain one octet (V.24 signals)
|
|
|
|
|
* or two octets (V.24 signals + break signals). This is specified in
|
|
|
|
|
* section 5.4.6.3.7 of the 07.10 mux spec.
|
|
|
|
|
*/
|
2011-06-14 20:23:28 +00:00
|
|
|
|
|
2022-02-18 07:31:22 +00:00
|
|
|
|
if (slen == 1)
|
2011-06-14 20:23:28 +00:00
|
|
|
|
modem = modem & 0x7f;
|
|
|
|
|
else {
|
|
|
|
|
brk = modem & 0x7f;
|
|
|
|
|
modem = (modem >> 7) & 0x7f;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Flow control/ready to communicate */
|
2012-08-13 12:43:58 +00:00
|
|
|
|
fc = (modem & MDM_FC) || !(modem & MDM_RTR);
|
|
|
|
|
if (fc && !dlci->constipated) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Need to throttle our output on this device */
|
2020-02-19 08:49:48 +00:00
|
|
|
|
dlci->constipated = true;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
} else if (!fc && dlci->constipated) {
|
2020-02-19 08:49:48 +00:00
|
|
|
|
dlci->constipated = false;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_dlci_data_kick(dlci);
|
|
|
|
|
}
|
2012-08-13 12:43:58 +00:00
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Map modem bits */
|
2012-08-13 12:43:58 +00:00
|
|
|
|
if (modem & MDM_RTC)
|
|
|
|
|
mlines |= TIOCM_DSR | TIOCM_DTR;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (modem & MDM_RTR)
|
|
|
|
|
mlines |= TIOCM_RTS | TIOCM_CTS;
|
|
|
|
|
if (modem & MDM_IC)
|
|
|
|
|
mlines |= TIOCM_RI;
|
|
|
|
|
if (modem & MDM_DV)
|
|
|
|
|
mlines |= TIOCM_CD;
|
|
|
|
|
|
|
|
|
|
/* Carrier drop -> hangup */
|
|
|
|
|
if (tty) {
|
|
|
|
|
if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
|
2016-01-11 04:36:15 +00:00
|
|
|
|
if (!C_CLOCAL(tty))
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_hangup(tty);
|
|
|
|
|
}
|
2013-01-03 14:53:03 +00:00
|
|
|
|
if (brk & 0x01)
|
|
|
|
|
tty_insert_flip_char(&dlci->port, 0, TTY_BREAK);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->modem_rx = mlines;
|
2023-02-06 11:46:06 +00:00
|
|
|
|
wake_up_interruptible(&dlci->gsm->event);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_process_negotiation - process received parameters
|
|
|
|
|
* @gsm: GSM channel
|
|
|
|
|
* @addr: DLCI address
|
|
|
|
|
* @cr: command/response
|
|
|
|
|
* @params: encoded parameters from the parameter negotiation message
|
|
|
|
|
*
|
|
|
|
|
* Used when the response for our parameter negotiation command was
|
|
|
|
|
* received.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr,
|
|
|
|
|
unsigned int cr,
|
|
|
|
|
const struct gsm_dlci_param_bits *params)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = gsm->dlci[addr];
|
|
|
|
|
unsigned int ftype, i, adaption, prio, n1, k;
|
|
|
|
|
|
|
|
|
|
i = FIELD_GET(PN_I_CL_FIELD_FTYPE, params->i_cl_bits);
|
|
|
|
|
adaption = FIELD_GET(PN_I_CL_FIELD_ADAPTION, params->i_cl_bits) + 1;
|
|
|
|
|
prio = FIELD_GET(PN_P_FIELD_PRIO, params->p_bits);
|
|
|
|
|
n1 = FIELD_GET(PN_N_FIELD_N1, get_unaligned_le16(¶ms->n_bits));
|
|
|
|
|
k = FIELD_GET(PN_K_FIELD_K, params->k_bits);
|
|
|
|
|
|
|
|
|
|
if (n1 < MIN_MTU) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s N1 out of range in PN\n", __func__);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (i) {
|
|
|
|
|
case 0x00:
|
|
|
|
|
ftype = UIH;
|
|
|
|
|
break;
|
|
|
|
|
case 0x01:
|
|
|
|
|
ftype = UI;
|
|
|
|
|
break;
|
|
|
|
|
case 0x02: /* I frames are not supported */
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s unsupported I frame request in PN\n",
|
|
|
|
|
__func__);
|
2023-08-17 09:32:28 +00:00
|
|
|
|
gsm->unsupported++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
default:
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s i out of range in PN\n", __func__);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!cr && gsm->initiator) {
|
|
|
|
|
if (adaption != dlci->adaption) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid adaption %d in PN\n",
|
|
|
|
|
__func__, adaption);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
if (prio != dlci->prio) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid priority %d in PN",
|
|
|
|
|
__func__, prio);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
if (n1 > gsm->mru || n1 > dlci->mtu) {
|
|
|
|
|
/* We requested a frame size but the other party wants
|
|
|
|
|
* to send larger frames. The standard allows only a
|
|
|
|
|
* smaller response value than requested (5.4.6.3.1).
|
|
|
|
|
*/
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid N1 %d in PN\n", __func__,
|
|
|
|
|
n1);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
dlci->mtu = n1;
|
|
|
|
|
if (ftype != dlci->ftype) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid i %d in PN\n", __func__, i);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
if (ftype != UI && ftype != UIH && k > dlci->k) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid k %d in PN\n", __func__, k);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
dlci->k = k;
|
|
|
|
|
} else if (cr && !gsm->initiator) {
|
|
|
|
|
/* Only convergence layer type 1 and 2 are supported. */
|
|
|
|
|
if (adaption != 1 && adaption != 2) {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s invalid adaption %d in PN\n",
|
|
|
|
|
__func__, adaption);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
dlci->adaption = adaption;
|
|
|
|
|
if (n1 > gsm->mru) {
|
|
|
|
|
/* Propose a smaller value */
|
|
|
|
|
dlci->mtu = gsm->mru;
|
|
|
|
|
} else if (n1 > MAX_MTU) {
|
|
|
|
|
/* Propose a smaller value */
|
|
|
|
|
dlci->mtu = MAX_MTU;
|
|
|
|
|
} else {
|
|
|
|
|
dlci->mtu = n1;
|
|
|
|
|
}
|
|
|
|
|
dlci->prio = prio;
|
|
|
|
|
dlci->ftype = ftype;
|
|
|
|
|
dlci->k = k;
|
|
|
|
|
} else {
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_control_modem - modem status received
|
|
|
|
|
* @gsm: GSM channel
|
|
|
|
|
* @data: data following command
|
|
|
|
|
* @clen: command length
|
|
|
|
|
*
|
|
|
|
|
* We have received a modem status control message. This is used by
|
|
|
|
|
* the GSM mux protocol to pass virtual modem line status and optionally
|
|
|
|
|
* to indicate break signals. Unpack it, convert to Linux representation
|
|
|
|
|
* and if need be stuff a break message down the tty.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-14 01:25:27 +00:00
|
|
|
|
static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
unsigned int addr = 0;
|
|
|
|
|
unsigned int modem = 0;
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
int len = clen;
|
2022-08-31 07:37:57 +00:00
|
|
|
|
int cl = clen;
|
2019-01-14 01:25:27 +00:00
|
|
|
|
const u8 *dp = data;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct tty_struct *tty;
|
|
|
|
|
|
2022-08-31 07:37:57 +00:00
|
|
|
|
len = gsm_read_ea_val(&addr, data, cl);
|
|
|
|
|
if (len < 1)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
addr >>= 1;
|
|
|
|
|
/* Closed port, or invalid ? */
|
|
|
|
|
if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
|
|
|
|
|
return;
|
|
|
|
|
dlci = gsm->dlci[addr];
|
|
|
|
|
|
2022-08-31 07:37:57 +00:00
|
|
|
|
/* Must be at least one byte following the EA */
|
|
|
|
|
if ((cl - len) < 1)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
dp += len;
|
|
|
|
|
cl -= len;
|
|
|
|
|
|
|
|
|
|
/* get the modem status */
|
|
|
|
|
len = gsm_read_ea_val(&modem, dp, cl);
|
|
|
|
|
if (len < 1)
|
|
|
|
|
return;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty = tty_port_tty_get(&dlci->port);
|
2022-08-31 07:37:57 +00:00
|
|
|
|
gsm_process_modem(tty, dlci, modem, cl);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (tty) {
|
|
|
|
|
tty_wakeup(tty);
|
|
|
|
|
tty_kref_put(tty);
|
|
|
|
|
}
|
|
|
|
|
gsm_control_reply(gsm, CMD_MSC, data, clen);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_control_negotiation - parameter negotiation received
|
|
|
|
|
* @gsm: GSM channel
|
|
|
|
|
* @cr: command/response flag
|
|
|
|
|
* @data: data following command
|
|
|
|
|
* @dlen: data length
|
|
|
|
|
*
|
|
|
|
|
* We have received a parameter negotiation message. This is used by
|
|
|
|
|
* the GSM mux protocol to configure protocol parameters for a new DLCI.
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr,
|
|
|
|
|
const u8 *data, unsigned int dlen)
|
|
|
|
|
{
|
|
|
|
|
unsigned int addr;
|
|
|
|
|
struct gsm_dlci_param_bits pn_reply;
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
struct gsm_dlci_param_bits *params;
|
|
|
|
|
|
2023-08-17 09:32:26 +00:00
|
|
|
|
if (dlen < sizeof(struct gsm_dlci_param_bits)) {
|
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
return;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
}
|
2022-11-03 09:17:43 +00:00
|
|
|
|
|
|
|
|
|
/* Invalid DLCI? */
|
|
|
|
|
params = (struct gsm_dlci_param_bits *)data;
|
|
|
|
|
addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits);
|
2023-08-17 09:32:26 +00:00
|
|
|
|
if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) {
|
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
return;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
}
|
2022-11-03 09:17:43 +00:00
|
|
|
|
dlci = gsm->dlci[addr];
|
|
|
|
|
|
|
|
|
|
/* Too late for parameter negotiation? */
|
2023-08-17 09:32:26 +00:00
|
|
|
|
if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) {
|
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
return;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
}
|
2022-11-03 09:17:43 +00:00
|
|
|
|
|
|
|
|
|
/* Process the received parameters */
|
|
|
|
|
if (gsm_process_negotiation(gsm, addr, cr, params) != 0) {
|
|
|
|
|
/* Negotiation failed. Close the link. */
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s PN failed\n", __func__);
|
2023-08-17 09:32:26 +00:00
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
gsm_dlci_close(dlci);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cr) {
|
|
|
|
|
/* Reply command with accepted parameters. */
|
|
|
|
|
if (gsm_encode_params(dlci, &pn_reply) == 0)
|
|
|
|
|
gsm_control_reply(gsm, CMD_PN, (const u8 *)&pn_reply,
|
|
|
|
|
sizeof(pn_reply));
|
|
|
|
|
else if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s PN invalid\n", __func__);
|
|
|
|
|
} else if (dlci->state == DLCI_CONFIGURE) {
|
|
|
|
|
/* Proceed with link setup by sending SABM before UA */
|
|
|
|
|
dlci->state = DLCI_OPENING;
|
|
|
|
|
gsm_command(gsm, dlci->addr, SABM|PF);
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
|
|
|
|
} else {
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_info("%s PN in invalid state\n", __func__);
|
2023-08-17 09:32:26 +00:00
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_control_rls - remote line status
|
|
|
|
|
* @gsm: GSM channel
|
|
|
|
|
* @data: data bytes
|
|
|
|
|
* @clen: data length
|
|
|
|
|
*
|
|
|
|
|
* The modem sends us a two byte message on the control channel whenever
|
|
|
|
|
* it wishes to send us an error state from the virtual link. Stuff
|
|
|
|
|
* this into the uplink tty if present
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-14 01:25:27 +00:00
|
|
|
|
static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2013-01-03 14:53:03 +00:00
|
|
|
|
struct tty_port *port;
|
2013-07-08 19:28:00 +00:00
|
|
|
|
unsigned int addr = 0;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
u8 bits;
|
|
|
|
|
int len = clen;
|
2019-01-14 01:25:27 +00:00
|
|
|
|
const u8 *dp = data;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
while (gsm_read_ea(&addr, *dp++) == 0) {
|
|
|
|
|
len--;
|
|
|
|
|
if (len == 0)
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* Must be at least one byte following ea */
|
|
|
|
|
len--;
|
|
|
|
|
if (len <= 0)
|
|
|
|
|
return;
|
|
|
|
|
addr >>= 1;
|
|
|
|
|
/* Closed port, or invalid ? */
|
|
|
|
|
if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL)
|
|
|
|
|
return;
|
|
|
|
|
/* No error ? */
|
|
|
|
|
bits = *dp;
|
|
|
|
|
if ((bits & 1) == 0)
|
|
|
|
|
return;
|
|
|
|
|
|
2013-01-03 14:53:03 +00:00
|
|
|
|
port = &gsm->dlci[addr]->port;
|
|
|
|
|
|
|
|
|
|
if (bits & 2)
|
|
|
|
|
tty_insert_flip_char(port, 0, TTY_OVERRUN);
|
|
|
|
|
if (bits & 4)
|
|
|
|
|
tty_insert_flip_char(port, 0, TTY_PARITY);
|
|
|
|
|
if (bits & 8)
|
|
|
|
|
tty_insert_flip_char(port, 0, TTY_FRAME);
|
|
|
|
|
|
2013-01-03 14:53:06 +00:00
|
|
|
|
tty_flip_buffer_push(port);
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_control_reply(gsm, CMD_RLS, data, clen);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_begin_close(struct gsm_dlci *dlci);
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_message - DLCI 0 control processing
|
|
|
|
|
* @gsm: our GSM mux
|
|
|
|
|
* @command: the command EA
|
|
|
|
|
* @data: data beyond the command/length EAs
|
|
|
|
|
* @clen: length
|
|
|
|
|
*
|
|
|
|
|
* Input processor for control messages from the other end of the link.
|
|
|
|
|
* Processes the incoming request and queues a response frame or an
|
|
|
|
|
* NSC response if not supported
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
|
2019-01-14 01:25:27 +00:00
|
|
|
|
const u8 *data, int clen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
u8 buf[1];
|
2012-08-13 12:44:40 +00:00
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
switch (command) {
|
|
|
|
|
case CMD_CLD: {
|
2022-04-22 07:10:23 +00:00
|
|
|
|
struct gsm_dlci *dlci = gsm->dlci[0];
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Modem wishes to close down */
|
|
|
|
|
if (dlci) {
|
2020-02-19 08:49:46 +00:00
|
|
|
|
dlci->dead = true;
|
|
|
|
|
gsm->dead = true;
|
2022-04-22 07:10:23 +00:00
|
|
|
|
gsm_dlci_begin_close(dlci);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case CMD_TEST:
|
|
|
|
|
/* Modem wishes to test, reply with the data */
|
|
|
|
|
gsm_control_reply(gsm, CMD_TEST, data, clen);
|
|
|
|
|
break;
|
|
|
|
|
case CMD_FCON:
|
|
|
|
|
/* Modem can accept data again */
|
2020-02-19 08:49:48 +00:00
|
|
|
|
gsm->constipated = false;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
gsm_control_reply(gsm, CMD_FCON, NULL, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Kick the link in case it is idling */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsmld_write_trigger(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
case CMD_FCOFF:
|
|
|
|
|
/* Modem wants us to STFU */
|
2020-02-19 08:49:48 +00:00
|
|
|
|
gsm->constipated = true;
|
2012-08-13 12:43:58 +00:00
|
|
|
|
gsm_control_reply(gsm, CMD_FCOFF, NULL, 0);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
case CMD_MSC:
|
|
|
|
|
/* Out of band modem line change indicator for a DLCI */
|
|
|
|
|
gsm_control_modem(gsm, data, clen);
|
|
|
|
|
break;
|
|
|
|
|
case CMD_RLS:
|
|
|
|
|
/* Out of band error reception for a DLCI */
|
|
|
|
|
gsm_control_rls(gsm, data, clen);
|
|
|
|
|
break;
|
|
|
|
|
case CMD_PSC:
|
|
|
|
|
/* Modem wishes to enter power saving state */
|
|
|
|
|
gsm_control_reply(gsm, CMD_PSC, NULL, 0);
|
|
|
|
|
break;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/* Optional commands */
|
|
|
|
|
case CMD_PN:
|
|
|
|
|
/* Modem sends a parameter negotiation command */
|
|
|
|
|
gsm_control_negotiation(gsm, 1, data, clen);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Optional unsupported commands */
|
2011-03-31 01:57:33 +00:00
|
|
|
|
case CMD_RPN: /* Remote port negotiation */
|
|
|
|
|
case CMD_SNC: /* Service negotiation command */
|
2023-08-17 09:32:28 +00:00
|
|
|
|
gsm->unsupported++;
|
|
|
|
|
fallthrough;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
default:
|
|
|
|
|
/* Reply to bad commands with an NSC */
|
|
|
|
|
buf[0] = command;
|
|
|
|
|
gsm_control_reply(gsm, CMD_NSC, buf, 1);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_response - process a response to our control
|
|
|
|
|
* @gsm: our GSM mux
|
|
|
|
|
* @command: the command (response) EA
|
|
|
|
|
* @data: data beyond the command/length EA
|
|
|
|
|
* @clen: length
|
|
|
|
|
*
|
|
|
|
|
* Process a response to an outstanding command. We only allow a single
|
|
|
|
|
* control message in flight so this is fairly easy. All the clean up
|
|
|
|
|
* is done by the caller, we just update the fields, flag it as done
|
|
|
|
|
* and return
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
|
2019-01-14 01:25:27 +00:00
|
|
|
|
const u8 *data, int clen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_control *ctrl;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
struct gsm_dlci *dlci;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gsm->control_lock, flags);
|
|
|
|
|
|
|
|
|
|
ctrl = gsm->pending_cmd;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
dlci = gsm->dlci[0];
|
2010-03-26 11:32:54 +00:00
|
|
|
|
command |= 1;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/* Does the reply match our command */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
|
|
|
|
|
/* Our command was replied to, kill the retry timer */
|
|
|
|
|
del_timer(&gsm->t2_timer);
|
|
|
|
|
gsm->pending_cmd = NULL;
|
|
|
|
|
/* Rejected by the other end */
|
|
|
|
|
if (command == CMD_NSC)
|
|
|
|
|
ctrl->error = -EOPNOTSUPP;
|
|
|
|
|
ctrl->done = 1;
|
|
|
|
|
wake_up(&gsm->event);
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/* Or did we receive the PN response to our PN command */
|
|
|
|
|
} else if (command == CMD_PN) {
|
|
|
|
|
gsm_control_negotiation(gsm, 0, data, clen);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
/* Or did we receive the TEST response to our TEST command */
|
|
|
|
|
} else if (command == CMD_TEST && clen == 1 && *data == gsm->ka_num) {
|
|
|
|
|
gsm->ka_retries = -1; /* trigger new keep-alive message */
|
|
|
|
|
if (dlci && !dlci->dead)
|
|
|
|
|
mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_keep_alive - check timeout or start keep-alive
|
|
|
|
|
* @t: timer contained in our gsm object
|
|
|
|
|
*
|
|
|
|
|
* Called off the keep-alive timer expiry signaling that our link
|
|
|
|
|
* partner is not responding anymore. Link will be closed.
|
|
|
|
|
* This is also called to startup our timer.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_control_keep_alive(struct timer_list *t)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = from_timer(gsm, t, ka_timer);
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gsm->control_lock, flags);
|
|
|
|
|
if (gsm->ka_num && gsm->ka_retries == 0) {
|
|
|
|
|
/* Keep-alive expired -> close the link */
|
|
|
|
|
if (debug & DBG_ERRORS)
|
|
|
|
|
pr_debug("%s keep-alive timed out\n", __func__);
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
if (gsm->dlci[0])
|
|
|
|
|
gsm_dlci_begin_close(gsm->dlci[0]);
|
|
|
|
|
return;
|
|
|
|
|
} else if (gsm->keep_alive && gsm->dlci[0] && !gsm->dlci[0]->dead) {
|
|
|
|
|
if (gsm->ka_retries > 0) {
|
|
|
|
|
/* T2 expired for keep-alive -> resend */
|
|
|
|
|
gsm->ka_retries--;
|
|
|
|
|
} else {
|
|
|
|
|
/* Start keep-alive timer */
|
|
|
|
|
gsm->ka_num++;
|
|
|
|
|
if (!gsm->ka_num)
|
|
|
|
|
gsm->ka_num++;
|
|
|
|
|
gsm->ka_retries = (signed int)gsm->n2;
|
|
|
|
|
}
|
|
|
|
|
gsm_control_command(gsm, CMD_TEST, &gsm->ka_num,
|
|
|
|
|
sizeof(gsm->ka_num));
|
|
|
|
|
mod_timer(&gsm->ka_timer,
|
|
|
|
|
jiffies + gsm->t2 * HZ / 100);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2010-11-04 15:17:27 +00:00
|
|
|
|
* gsm_control_transmit - send control packet
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @gsm: gsm mux
|
|
|
|
|
* @ctrl: frame to send
|
|
|
|
|
*
|
|
|
|
|
* Send out a pending control command (called under control lock)
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl)
|
|
|
|
|
{
|
2022-08-31 07:37:58 +00:00
|
|
|
|
gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_retransmit - retransmit a control frame
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @t: timer contained in our gsm object
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Called off the T2 timer expiry in order to retransmit control frames
|
|
|
|
|
* that have been lost in the system somewhere. The control_lock protects
|
|
|
|
|
* us from colliding with another sender or a receive completion event.
|
|
|
|
|
* In that situation the timer may still occur in a small window but
|
|
|
|
|
* gsm->pending_cmd will be NULL and we just let the timer expire.
|
|
|
|
|
*/
|
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
|
static void gsm_control_retransmit(struct timer_list *t)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
|
struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_control *ctrl;
|
|
|
|
|
unsigned long flags;
|
|
|
|
|
spin_lock_irqsave(&gsm->control_lock, flags);
|
|
|
|
|
ctrl = gsm->pending_cmd;
|
|
|
|
|
if (ctrl) {
|
2022-07-01 06:16:50 +00:00
|
|
|
|
if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->pending_cmd = NULL;
|
|
|
|
|
ctrl->error = -ETIMEDOUT;
|
|
|
|
|
ctrl->done = 1;
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
wake_up(&gsm->event);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2022-04-14 09:42:16 +00:00
|
|
|
|
gsm->cretries--;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_control_transmit(gsm, ctrl);
|
|
|
|
|
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_send - send a control frame on DLCI 0
|
|
|
|
|
* @gsm: the GSM channel
|
|
|
|
|
* @command: command to send including CR bit
|
|
|
|
|
* @data: bytes of data (must be kmalloced)
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @clen: length of the block to send
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Queue and dispatch a control command. Only one command can be
|
|
|
|
|
* active at a time. In theory more can be outstanding but the matching
|
|
|
|
|
* gets really complicated so for now stick to one outstanding.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
|
|
|
|
|
unsigned int command, u8 *data, int clen)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
|
2022-10-02 04:07:09 +00:00
|
|
|
|
GFP_ATOMIC);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
if (ctrl == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
retry:
|
|
|
|
|
wait_event(gsm->event, gsm->pending_cmd == NULL);
|
|
|
|
|
spin_lock_irqsave(&gsm->control_lock, flags);
|
|
|
|
|
if (gsm->pending_cmd != NULL) {
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
goto retry;
|
|
|
|
|
}
|
|
|
|
|
ctrl->cmd = command;
|
|
|
|
|
ctrl->data = data;
|
|
|
|
|
ctrl->len = clen;
|
|
|
|
|
gsm->pending_cmd = ctrl;
|
2018-04-07 17:19:50 +00:00
|
|
|
|
|
|
|
|
|
/* If DLCI0 is in ADM mode skip retries, it won't respond */
|
|
|
|
|
if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
|
2022-04-14 09:42:16 +00:00
|
|
|
|
gsm->cretries = 0;
|
2018-04-07 17:19:50 +00:00
|
|
|
|
else
|
|
|
|
|
gsm->cretries = gsm->n2;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
|
|
|
|
|
gsm_control_transmit(gsm, ctrl);
|
|
|
|
|
spin_unlock_irqrestore(&gsm->control_lock, flags);
|
|
|
|
|
return ctrl;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_control_wait - wait for a control to finish
|
|
|
|
|
* @gsm: GSM mux
|
|
|
|
|
* @control: control we are waiting on
|
|
|
|
|
*
|
|
|
|
|
* Waits for the control to complete or time out. Frees any used
|
|
|
|
|
* resources and returns 0 for success, or an error if the remote
|
|
|
|
|
* rejected or ignored the request.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control)
|
|
|
|
|
{
|
|
|
|
|
int err;
|
|
|
|
|
wait_event(gsm->event, control->done == 1);
|
|
|
|
|
err = control->error;
|
|
|
|
|
kfree(control);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* DLCI level handling: Needs krefs
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* State transitions and timers
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_close - a DLCI has closed
|
|
|
|
|
* @dlci: DLCI that closed
|
|
|
|
|
*
|
|
|
|
|
* Perform processing when moving a DLCI into closed state. If there
|
|
|
|
|
* is an attached tty this is hung up
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_close(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
del_timer(&dlci->t1);
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_ERRORS)
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_debug("DLCI %d goes closed.\n", dlci->addr);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->state = DLCI_CLOSED;
|
2022-07-01 06:16:44 +00:00
|
|
|
|
/* Prevent us from sending data before the link is up again */
|
|
|
|
|
dlci->constipated = true;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci->addr != 0) {
|
2013-03-07 12:12:30 +00:00
|
|
|
|
tty_port_tty_hangup(&dlci->port, false);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsm_dlci_clear_queues(dlci->gsm, dlci);
|
2022-02-18 07:31:23 +00:00
|
|
|
|
/* Ensure that gsmtty_open() can return. */
|
2023-01-17 09:03:47 +00:00
|
|
|
|
tty_port_set_initialized(&dlci->port, false);
|
2022-02-18 07:31:23 +00:00
|
|
|
|
wake_up_interruptible(&dlci->port.open_wait);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
} else {
|
|
|
|
|
del_timer(&dlci->gsm->ka_timer);
|
2020-02-19 08:49:46 +00:00
|
|
|
|
dlci->gsm->dead = true;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* A DLCI 0 close is a MUX termination so we need to kick that
|
|
|
|
|
back to userspace somehow */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsm_dlci_data_kick(dlci);
|
2023-02-06 11:46:06 +00:00
|
|
|
|
wake_up_all(&dlci->gsm->event);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_open - a DLCI has opened
|
|
|
|
|
* @dlci: DLCI that opened
|
|
|
|
|
*
|
|
|
|
|
* Perform processing when moving a DLCI into open state.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_open(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
2023-02-14 12:27:37 +00:00
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Note that SABM UA .. SABM UA first UA lost can mean that we go
|
|
|
|
|
open -> open */
|
|
|
|
|
del_timer(&dlci->t1);
|
|
|
|
|
/* This will let a tty open continue */
|
|
|
|
|
dlci->state = DLCI_OPEN;
|
2022-07-01 06:16:44 +00:00
|
|
|
|
dlci->constipated = false;
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_ERRORS)
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_debug("DLCI %d goes open.\n", dlci->addr);
|
2022-04-20 10:13:44 +00:00
|
|
|
|
/* Send current modem state */
|
2023-02-14 12:27:37 +00:00
|
|
|
|
if (dlci->addr) {
|
2022-04-22 07:10:24 +00:00
|
|
|
|
gsm_modem_update(dlci, 0);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
} else {
|
|
|
|
|
/* Start keep-alive control */
|
|
|
|
|
gsm->ka_num = 0;
|
|
|
|
|
gsm->ka_retries = -1;
|
|
|
|
|
mod_timer(&gsm->ka_timer,
|
|
|
|
|
jiffies + gsm->keep_alive * HZ / 100);
|
|
|
|
|
}
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsm_dlci_data_kick(dlci);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
wake_up(&dlci->gsm->event);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-03 09:17:43 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_negotiate - start parameter negotiation
|
|
|
|
|
* @dlci: DLCI to open
|
|
|
|
|
*
|
|
|
|
|
* Starts the parameter negotiation for the new DLCI. This needs to be done
|
|
|
|
|
* before the DLCI initialized the channel via SABM.
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_dlci_negotiate(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
struct gsm_dlci_param_bits params;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = gsm_encode_params(dlci, ¶ms);
|
|
|
|
|
if (ret != 0)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
/* We cannot asynchronous wait for the command response with
|
|
|
|
|
* gsm_command() and gsm_control_wait() at this point.
|
|
|
|
|
*/
|
|
|
|
|
ret = gsm_control_command(gsm, CMD_PN, (const u8 *)¶ms,
|
|
|
|
|
sizeof(params));
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_t1 - T1 timer expiry
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @t: timer contained in the DLCI that opened
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* The T1 timer handles retransmits of control frames (essentially of
|
|
|
|
|
* SABM and DISC). We resend the command until the retry count runs out
|
|
|
|
|
* in which case an opening port goes back to closed and a closing port
|
|
|
|
|
* is simply put into closed state (any further frames from the other
|
|
|
|
|
* end will get a DM response)
|
2018-01-03 18:18:03 +00:00
|
|
|
|
*
|
|
|
|
|
* Some control dlci can stay in ADM mode with other dlci working just
|
|
|
|
|
* fine. In that case we can just keep the control dlci open after the
|
|
|
|
|
* DLCI_OPENING retries time out.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
|
static void gsm_dlci_t1(struct timer_list *t)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
|
struct gsm_dlci *dlci = from_timer(dlci, t, t1);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
|
|
|
|
|
switch (dlci->state) {
|
2022-11-03 09:17:43 +00:00
|
|
|
|
case DLCI_CONFIGURE:
|
|
|
|
|
if (dlci->retries && gsm_dlci_negotiate(dlci) == 0) {
|
|
|
|
|
dlci->retries--;
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
|
|
|
|
} else {
|
2023-08-17 09:32:26 +00:00
|
|
|
|
gsm->open_error++;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
gsm_dlci_begin_close(dlci); /* prevent half open link */
|
|
|
|
|
}
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
case DLCI_OPENING:
|
|
|
|
|
if (dlci->retries) {
|
2022-07-07 11:32:20 +00:00
|
|
|
|
dlci->retries--;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
2018-01-03 18:18:03 +00:00
|
|
|
|
} else if (!dlci->addr && gsm->control == (DM | PF)) {
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_ERRORS)
|
2018-01-03 18:18:03 +00:00
|
|
|
|
pr_info("DLCI %d opening in ADM mode.\n",
|
|
|
|
|
dlci->addr);
|
2018-04-07 17:19:50 +00:00
|
|
|
|
dlci->mode = DLCI_MODE_ADM;
|
2018-01-03 18:18:03 +00:00
|
|
|
|
gsm_dlci_open(dlci);
|
|
|
|
|
} else {
|
2023-08-17 09:32:26 +00:00
|
|
|
|
gsm->open_error++;
|
2022-02-18 07:31:19 +00:00
|
|
|
|
gsm_dlci_begin_close(dlci); /* prevent half open link */
|
2018-01-03 18:18:03 +00:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case DLCI_CLOSING:
|
|
|
|
|
if (dlci->retries) {
|
2022-07-07 11:32:20 +00:00
|
|
|
|
dlci->retries--;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
|
|
|
|
} else
|
|
|
|
|
gsm_dlci_close(dlci);
|
|
|
|
|
break;
|
2020-02-19 08:49:41 +00:00
|
|
|
|
default:
|
|
|
|
|
pr_debug("%s: unhandled state: %d\n", __func__, dlci->state);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_begin_open - start channel open procedure
|
|
|
|
|
* @dlci: DLCI to open
|
|
|
|
|
*
|
|
|
|
|
* Commence opening a DLCI from the Linux side. We issue SABM messages
|
2018-01-03 18:18:03 +00:00
|
|
|
|
* to the modem which should then reply with a UA or ADM, at which point
|
|
|
|
|
* we will move into open state. Opening is done asynchronously with retry
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* running off timers and the responses.
|
2022-11-03 09:17:43 +00:00
|
|
|
|
* Parameter negotiation is performed before SABM if required.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_begin_open(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
2022-11-03 09:17:43 +00:00
|
|
|
|
struct gsm_mux *gsm = dlci ? dlci->gsm : NULL;
|
|
|
|
|
bool need_pn = false;
|
|
|
|
|
|
|
|
|
|
if (!gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
2022-11-03 09:17:43 +00:00
|
|
|
|
|
|
|
|
|
if (dlci->addr != 0) {
|
|
|
|
|
if (gsm->adaption != 1 || gsm->adaption != dlci->adaption)
|
|
|
|
|
need_pn = true;
|
|
|
|
|
if (dlci->prio != (roundup(dlci->addr + 1, 8) - 1))
|
|
|
|
|
need_pn = true;
|
|
|
|
|
if (gsm->ftype != dlci->ftype)
|
|
|
|
|
need_pn = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (dlci->state) {
|
|
|
|
|
case DLCI_CLOSED:
|
2023-03-15 10:53:52 +00:00
|
|
|
|
case DLCI_WAITING_CONFIG:
|
2022-11-03 09:17:43 +00:00
|
|
|
|
case DLCI_CLOSING:
|
|
|
|
|
dlci->retries = gsm->n2;
|
|
|
|
|
if (!need_pn) {
|
|
|
|
|
dlci->state = DLCI_OPENING;
|
|
|
|
|
gsm_command(gsm, dlci->addr, SABM|PF);
|
|
|
|
|
} else {
|
|
|
|
|
/* Configure DLCI before setup */
|
|
|
|
|
dlci->state = DLCI_CONFIGURE;
|
|
|
|
|
if (gsm_dlci_negotiate(dlci) != 0) {
|
|
|
|
|
gsm_dlci_close(dlci);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-01 06:16:44 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_set_opening - change state to opening
|
|
|
|
|
* @dlci: DLCI to open
|
|
|
|
|
*
|
|
|
|
|
* Change internal state to wait for DLCI open from initiator side.
|
|
|
|
|
* We set off timers and responses upon reception of an SABM.
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_dlci_set_opening(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
switch (dlci->state) {
|
|
|
|
|
case DLCI_CLOSED:
|
2023-03-15 10:53:52 +00:00
|
|
|
|
case DLCI_WAITING_CONFIG:
|
2022-07-01 06:16:44 +00:00
|
|
|
|
case DLCI_CLOSING:
|
|
|
|
|
dlci->state = DLCI_OPENING;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-15 10:53:52 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_set_wait_config - wait for channel configuration
|
|
|
|
|
* @dlci: DLCI to configure
|
|
|
|
|
*
|
|
|
|
|
* Wait for a DLCI configuration from the application.
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_dlci_set_wait_config(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
switch (dlci->state) {
|
|
|
|
|
case DLCI_CLOSED:
|
|
|
|
|
case DLCI_CLOSING:
|
|
|
|
|
dlci->state = DLCI_WAITING_CONFIG;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_begin_close - start channel open procedure
|
|
|
|
|
* @dlci: DLCI to open
|
|
|
|
|
*
|
|
|
|
|
* Commence closing a DLCI from the Linux side. We issue DISC messages
|
|
|
|
|
* to the modem which should then reply with a UA, at which point we
|
|
|
|
|
* will move into closed state. Closing is done asynchronously with retry
|
|
|
|
|
* off timers. We may also receive a DM reply from the other end which
|
|
|
|
|
* indicates the channel was already closed.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING)
|
|
|
|
|
return;
|
|
|
|
|
dlci->retries = gsm->n2;
|
|
|
|
|
dlci->state = DLCI_CLOSING;
|
|
|
|
|
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
|
|
|
|
|
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
|
2023-02-06 11:46:06 +00:00
|
|
|
|
wake_up_interruptible(&gsm->event);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_data - data arrived
|
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @data: block of bytes received
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @clen: length of received block
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* A UI or UIH frame has arrived which contains data for a channel
|
|
|
|
|
* other than the control channel. If the relevant virtual tty is
|
|
|
|
|
* open we shovel the bits down it, if not we drop them.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-14 01:25:27 +00:00
|
|
|
|
static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
/* krefs .. */
|
|
|
|
|
struct tty_port *port = &dlci->port;
|
2013-01-03 14:53:06 +00:00
|
|
|
|
struct tty_struct *tty;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned int modem = 0;
|
2022-08-31 07:37:57 +00:00
|
|
|
|
int len;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_TTY)
|
2022-08-31 07:37:57 +00:00
|
|
|
|
pr_debug("%d bytes for tty\n", clen);
|
2013-01-03 14:53:06 +00:00
|
|
|
|
switch (dlci->adaption) {
|
|
|
|
|
/* Unsupported types */
|
2019-02-25 17:28:14 +00:00
|
|
|
|
case 4: /* Packetised interruptible data */
|
2013-01-03 14:53:06 +00:00
|
|
|
|
break;
|
2019-02-25 17:28:14 +00:00
|
|
|
|
case 3: /* Packetised uininterruptible voice/data */
|
2013-01-03 14:53:06 +00:00
|
|
|
|
break;
|
2019-02-25 17:28:14 +00:00
|
|
|
|
case 2: /* Asynchronous serial with line state in each frame */
|
2022-08-31 07:37:57 +00:00
|
|
|
|
len = gsm_read_ea_val(&modem, data, clen);
|
|
|
|
|
if (len < 1)
|
|
|
|
|
return;
|
2013-01-03 14:53:06 +00:00
|
|
|
|
tty = tty_port_tty_get(port);
|
|
|
|
|
if (tty) {
|
2022-08-31 07:37:57 +00:00
|
|
|
|
gsm_process_modem(tty, dlci, modem, len);
|
2022-04-14 09:42:19 +00:00
|
|
|
|
tty_wakeup(tty);
|
2013-01-03 14:53:06 +00:00
|
|
|
|
tty_kref_put(tty);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2022-08-31 07:37:57 +00:00
|
|
|
|
/* Skip processed modem data */
|
|
|
|
|
data += len;
|
|
|
|
|
clen -= len;
|
2020-08-23 22:36:59 +00:00
|
|
|
|
fallthrough;
|
2019-02-25 17:28:14 +00:00
|
|
|
|
case 1: /* Line state will go via DLCI 0 controls only */
|
2013-01-03 14:53:06 +00:00
|
|
|
|
default:
|
2022-08-31 07:37:57 +00:00
|
|
|
|
tty_insert_flip_string(port, data, clen);
|
2013-01-03 14:53:06 +00:00
|
|
|
|
tty_flip_buffer_push(port);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2021-05-20 12:19:05 +00:00
|
|
|
|
* gsm_dlci_command - data arrived on control channel
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @data: block of bytes received
|
|
|
|
|
* @len: length of received block
|
|
|
|
|
*
|
|
|
|
|
* A UI or UIH frame has arrived which contains data for DLCI 0 the
|
|
|
|
|
* control channel. This should contain a command EA followed by
|
|
|
|
|
* control data bytes. The command EA contains a command/response bit
|
|
|
|
|
* and we divide up the work accordingly.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-01-14 01:25:27 +00:00
|
|
|
|
static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
/* See what command is involved */
|
|
|
|
|
unsigned int command = 0;
|
2022-08-31 07:37:57 +00:00
|
|
|
|
unsigned int clen = 0;
|
|
|
|
|
unsigned int dlen;
|
|
|
|
|
|
|
|
|
|
/* read the command */
|
|
|
|
|
dlen = gsm_read_ea_val(&command, data, len);
|
|
|
|
|
len -= dlen;
|
|
|
|
|
data += dlen;
|
|
|
|
|
|
|
|
|
|
/* read any control data */
|
|
|
|
|
dlen = gsm_read_ea_val(&clen, data, len);
|
|
|
|
|
len -= dlen;
|
|
|
|
|
data += dlen;
|
|
|
|
|
|
|
|
|
|
/* Malformed command? */
|
2023-08-17 09:32:27 +00:00
|
|
|
|
if (clen > len) {
|
|
|
|
|
dlci->gsm->malformed++;
|
2022-08-31 07:37:57 +00:00
|
|
|
|
return;
|
2023-08-17 09:32:27 +00:00
|
|
|
|
}
|
2022-08-31 07:37:57 +00:00
|
|
|
|
|
|
|
|
|
if (command & 1)
|
|
|
|
|
gsm_control_message(dlci->gsm, command, data, clen);
|
|
|
|
|
else
|
|
|
|
|
gsm_control_response(dlci->gsm, command, data, clen);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-07-01 06:16:47 +00:00
|
|
|
|
/**
|
2022-10-08 11:02:21 +00:00
|
|
|
|
* gsm_kick_timer - transmit if possible
|
|
|
|
|
* @t: timer contained in our gsm object
|
2022-07-01 06:16:47 +00:00
|
|
|
|
*
|
|
|
|
|
* Transmit data from DLCIs if the queue is empty. We can't rely on
|
|
|
|
|
* a tty wakeup except when we filled the pipe so we need to fire off
|
|
|
|
|
* new data ourselves in other cases.
|
|
|
|
|
*/
|
2022-10-08 11:02:21 +00:00
|
|
|
|
static void gsm_kick_timer(struct timer_list *t)
|
2022-07-01 06:16:47 +00:00
|
|
|
|
{
|
2022-10-08 11:02:21 +00:00
|
|
|
|
struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
2022-07-01 06:16:47 +00:00
|
|
|
|
int sent = 0;
|
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
2022-07-01 06:16:47 +00:00
|
|
|
|
/* If we have nothing running then we need to fire up */
|
|
|
|
|
if (gsm->tx_bytes < TX_THRESH_LO)
|
|
|
|
|
sent = gsm_dlci_data_sweep(gsm);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
2022-07-01 06:16:47 +00:00
|
|
|
|
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (sent && debug & DBG_DATA)
|
2022-07-01 06:16:47 +00:00
|
|
|
|
pr_info("%s TX queue stalled\n", __func__);
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-15 10:53:52 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_copy_config_values - copy DLCI configuration
|
|
|
|
|
* @dlci: source DLCI
|
|
|
|
|
* @dc: configuration structure to fill
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_dlci_copy_config_values(struct gsm_dlci *dlci, struct gsm_dlci_config *dc)
|
|
|
|
|
{
|
|
|
|
|
memset(dc, 0, sizeof(*dc));
|
|
|
|
|
dc->channel = (u32)dlci->addr;
|
|
|
|
|
dc->adaption = (u32)dlci->adaption;
|
|
|
|
|
dc->mtu = (u32)dlci->mtu;
|
|
|
|
|
dc->priority = (u32)dlci->prio;
|
|
|
|
|
if (dlci->ftype == UIH)
|
|
|
|
|
dc->i = 1;
|
|
|
|
|
else
|
|
|
|
|
dc->i = 2;
|
|
|
|
|
dc->k = (u32)dlci->k;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_config - configure DLCI from configuration
|
|
|
|
|
* @dlci: DLCI to configure
|
|
|
|
|
* @dc: DLCI configuration
|
|
|
|
|
* @open: open DLCI after configuration?
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, int open)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm;
|
|
|
|
|
bool need_restart = false;
|
|
|
|
|
bool need_open = false;
|
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check that userspace doesn't put stuff in here to prevent breakages
|
|
|
|
|
* in the future.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(dc->reserved); i++)
|
|
|
|
|
if (dc->reserved[i])
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (!dlci)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
gsm = dlci->gsm;
|
|
|
|
|
|
|
|
|
|
/* Stuff we don't support yet - I frame transport */
|
|
|
|
|
if (dc->adaption != 1 && dc->adaption != 2)
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
if (dc->mtu > MAX_MTU || dc->mtu < MIN_MTU || dc->mtu > gsm->mru)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (dc->priority >= 64)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (dc->i == 0 || dc->i > 2) /* UIH and UI only */
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (dc->k > 7)
|
|
|
|
|
return -EINVAL;
|
2023-08-17 09:32:23 +00:00
|
|
|
|
if (dc->flags & ~GSM_FL_RESTART) /* allow future extensions */
|
|
|
|
|
return -EINVAL;
|
2023-03-15 10:53:52 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* See what is needed for reconfiguration
|
|
|
|
|
*/
|
|
|
|
|
/* Framing fields */
|
|
|
|
|
if (dc->adaption != dlci->adaption)
|
|
|
|
|
need_restart = true;
|
|
|
|
|
if (dc->mtu != dlci->mtu)
|
|
|
|
|
need_restart = true;
|
|
|
|
|
if (dc->i != dlci->ftype)
|
|
|
|
|
need_restart = true;
|
|
|
|
|
/* Requires care */
|
|
|
|
|
if (dc->priority != dlci->prio)
|
|
|
|
|
need_restart = true;
|
2023-08-17 09:32:23 +00:00
|
|
|
|
if (dc->flags & GSM_FL_RESTART)
|
|
|
|
|
need_restart = true;
|
2023-03-15 10:53:52 +00:00
|
|
|
|
|
|
|
|
|
if ((open && gsm->wait_config) || need_restart)
|
|
|
|
|
need_open = true;
|
|
|
|
|
if (dlci->state == DLCI_WAITING_CONFIG) {
|
|
|
|
|
need_restart = false;
|
|
|
|
|
need_open = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Close down what is needed, restart and initiate the new
|
|
|
|
|
* configuration.
|
|
|
|
|
*/
|
|
|
|
|
if (need_restart) {
|
|
|
|
|
gsm_dlci_begin_close(dlci);
|
|
|
|
|
wait_event_interruptible(gsm->event, dlci->state == DLCI_CLOSED);
|
|
|
|
|
if (signal_pending(current))
|
|
|
|
|
return -EINTR;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* Setup the new configuration values
|
|
|
|
|
*/
|
|
|
|
|
dlci->adaption = (int)dc->adaption;
|
|
|
|
|
|
|
|
|
|
if (dc->mtu)
|
|
|
|
|
dlci->mtu = (unsigned int)dc->mtu;
|
|
|
|
|
else
|
|
|
|
|
dlci->mtu = gsm->mtu;
|
|
|
|
|
|
|
|
|
|
if (dc->priority)
|
|
|
|
|
dlci->prio = (u8)dc->priority;
|
|
|
|
|
else
|
|
|
|
|
dlci->prio = roundup(dlci->addr + 1, 8) - 1;
|
|
|
|
|
|
|
|
|
|
if (dc->i == 1)
|
|
|
|
|
dlci->ftype = UIH;
|
|
|
|
|
else if (dc->i == 2)
|
|
|
|
|
dlci->ftype = UI;
|
|
|
|
|
|
|
|
|
|
if (dc->k)
|
|
|
|
|
dlci->k = (u8)dc->k;
|
|
|
|
|
else
|
|
|
|
|
dlci->k = gsm->k;
|
|
|
|
|
|
|
|
|
|
if (need_open) {
|
|
|
|
|
if (gsm->initiator)
|
|
|
|
|
gsm_dlci_begin_open(dlci);
|
|
|
|
|
else
|
|
|
|
|
gsm_dlci_set_opening(dlci);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/*
|
|
|
|
|
* Allocate/Free DLCI channels
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_alloc - allocate a DLCI
|
|
|
|
|
* @gsm: GSM mux
|
|
|
|
|
* @addr: address of the DLCI
|
|
|
|
|
*
|
|
|
|
|
* Allocate and install a new DLCI object into the GSM mux.
|
|
|
|
|
*
|
|
|
|
|
* FIXME: review locking races
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC);
|
|
|
|
|
if (dlci == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
spin_lock_init(&dlci->lock);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
mutex_init(&dlci->mutex);
|
2022-05-04 08:17:33 +00:00
|
|
|
|
if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
kfree(dlci);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
skb_queue_head_init(&dlci->skb_list);
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
|
timer_setup(&dlci->t1, gsm_dlci_t1, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_port_init(&dlci->port);
|
|
|
|
|
dlci->port.ops = &gsm_port_ops;
|
|
|
|
|
dlci->gsm = gsm;
|
|
|
|
|
dlci->addr = addr;
|
|
|
|
|
dlci->adaption = gsm->adaption;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
dlci->mtu = gsm->mtu;
|
|
|
|
|
if (addr == 0)
|
|
|
|
|
dlci->prio = 0;
|
|
|
|
|
else
|
|
|
|
|
dlci->prio = roundup(addr + 1, 8) - 1;
|
|
|
|
|
dlci->ftype = gsm->ftype;
|
|
|
|
|
dlci->k = gsm->k;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->state = DLCI_CLOSED;
|
2022-07-01 06:16:44 +00:00
|
|
|
|
if (addr) {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->data = gsm_dlci_data;
|
2022-07-01 06:16:44 +00:00
|
|
|
|
/* Prevent us from sending data before the link is up */
|
|
|
|
|
dlci->constipated = true;
|
|
|
|
|
} else {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci->data = gsm_dlci_command;
|
2022-07-01 06:16:44 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->dlci[addr] = dlci;
|
|
|
|
|
return dlci;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
2011-06-16 21:20:13 +00:00
|
|
|
|
* gsm_dlci_free - free DLCI
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @port: tty port for DLCI to free
|
2011-06-16 21:20:13 +00:00
|
|
|
|
*
|
|
|
|
|
* Free up a DLCI.
|
|
|
|
|
*
|
|
|
|
|
* Can sleep.
|
|
|
|
|
*/
|
2012-11-15 08:49:53 +00:00
|
|
|
|
static void gsm_dlci_free(struct tty_port *port)
|
2011-06-16 21:20:13 +00:00
|
|
|
|
{
|
2012-11-15 08:49:53 +00:00
|
|
|
|
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
|
2022-12-20 18:45:19 +00:00
|
|
|
|
timer_shutdown_sync(&dlci->t1);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
dlci->gsm->dlci[dlci->addr] = NULL;
|
2020-02-19 08:49:40 +00:00
|
|
|
|
kfifo_free(&dlci->fifo);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
while ((dlci->skb = skb_dequeue(&dlci->skb_list)))
|
2012-08-13 12:45:15 +00:00
|
|
|
|
dev_kfree_skb(dlci->skb);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
kfree(dlci);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void dlci_get(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
2012-11-15 08:49:53 +00:00
|
|
|
|
tty_port_get(&dlci->port);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void dlci_put(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
2012-11-15 08:49:53 +00:00
|
|
|
|
tty_port_put(&dlci->port);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-30 10:44:50 +00:00
|
|
|
|
static void gsm_destroy_network(struct gsm_dlci *dlci);
|
|
|
|
|
|
2011-06-16 21:20:13 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_dlci_release - release DLCI
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @dlci: DLCI to destroy
|
|
|
|
|
*
|
2011-06-16 21:20:13 +00:00
|
|
|
|
* Release a DLCI. Actual free is deferred until either
|
|
|
|
|
* mux is closed or tty is closed - whichever is last.
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Can sleep.
|
|
|
|
|
*/
|
2011-06-16 21:20:13 +00:00
|
|
|
|
static void gsm_dlci_release(struct gsm_dlci *dlci)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct tty_struct *tty = tty_port_tty_get(&dlci->port);
|
|
|
|
|
if (tty) {
|
2013-01-30 10:44:50 +00:00
|
|
|
|
mutex_lock(&dlci->mutex);
|
|
|
|
|
gsm_destroy_network(dlci);
|
|
|
|
|
mutex_unlock(&dlci->mutex);
|
|
|
|
|
|
2022-02-18 07:31:20 +00:00
|
|
|
|
/* We cannot use tty_hangup() because in tty_kref_put() the tty
|
|
|
|
|
* driver assumes that the hangup queue is free and reuses it to
|
|
|
|
|
* queue release_one_tty() -> NULL pointer panic in
|
|
|
|
|
* process_one_work().
|
|
|
|
|
*/
|
|
|
|
|
tty_vhangup(tty);
|
2013-12-18 05:30:11 +00:00
|
|
|
|
|
2013-01-30 10:44:50 +00:00
|
|
|
|
tty_port_tty_set(&dlci->port, NULL);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_kref_put(tty);
|
|
|
|
|
}
|
2013-01-30 10:44:50 +00:00
|
|
|
|
dlci->state = DLCI_CLOSED;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
dlci_put(dlci);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* LAPBish link layer logic
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_queue - a GSM frame is ready to process
|
|
|
|
|
* @gsm: pointer to our gsm mux
|
|
|
|
|
*
|
|
|
|
|
* At this point in time a frame has arrived and been demangled from
|
|
|
|
|
* the line encoding. All the differences between the encodings have
|
|
|
|
|
* been handled below us and the frame is unpacked into the structures.
|
|
|
|
|
* The fcs holds the header FCS but any data FCS must be added here.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_queue(struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
u8 cr;
|
|
|
|
|
int address;
|
|
|
|
|
|
|
|
|
|
if (gsm->fcs != GOOD_FCS) {
|
|
|
|
|
gsm->bad_fcs++;
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_DATA)
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_debug("BAD FCS %02x\n", gsm->fcs);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
address = gsm->address >> 1;
|
|
|
|
|
if (address >= NUM_DLCI)
|
|
|
|
|
goto invalid;
|
|
|
|
|
|
|
|
|
|
cr = gsm->address & 1; /* C/R bit */
|
2022-02-18 07:31:18 +00:00
|
|
|
|
cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len);
|
|
|
|
|
|
|
|
|
|
dlci = gsm->dlci[address];
|
|
|
|
|
|
|
|
|
|
switch (gsm->control) {
|
|
|
|
|
case SABM|PF:
|
2023-08-17 09:32:26 +00:00
|
|
|
|
if (cr == 1) {
|
|
|
|
|
gsm->open_error++;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
goto invalid;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci == NULL)
|
|
|
|
|
dlci = gsm_dlci_alloc(gsm, address);
|
2023-08-17 09:32:26 +00:00
|
|
|
|
if (dlci == NULL) {
|
|
|
|
|
gsm->open_error++;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
2023-08-17 09:32:26 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci->dead)
|
2021-08-20 12:17:47 +00:00
|
|
|
|
gsm_response(gsm, address, DM|PF);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
else {
|
2021-08-20 12:17:47 +00:00
|
|
|
|
gsm_response(gsm, address, UA|PF);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_dlci_open(dlci);
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case DISC|PF:
|
2021-08-20 12:17:46 +00:00
|
|
|
|
if (cr == 1)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
goto invalid;
|
|
|
|
|
if (dlci == NULL || dlci->state == DLCI_CLOSED) {
|
2021-08-20 12:17:47 +00:00
|
|
|
|
gsm_response(gsm, address, DM|PF);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* Real close complete */
|
2022-04-22 07:10:23 +00:00
|
|
|
|
gsm_response(gsm, address, UA|PF);
|
|
|
|
|
gsm_dlci_close(dlci);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case UA|PF:
|
|
|
|
|
if (cr == 0 || dlci == NULL)
|
|
|
|
|
break;
|
|
|
|
|
switch (dlci->state) {
|
|
|
|
|
case DLCI_CLOSING:
|
|
|
|
|
gsm_dlci_close(dlci);
|
|
|
|
|
break;
|
|
|
|
|
case DLCI_OPENING:
|
|
|
|
|
gsm_dlci_open(dlci);
|
|
|
|
|
break;
|
2020-02-19 08:49:41 +00:00
|
|
|
|
default:
|
|
|
|
|
pr_debug("%s: unhandled state: %d\n", __func__,
|
|
|
|
|
dlci->state);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case DM: /* DM can be valid unsolicited */
|
|
|
|
|
case DM|PF:
|
|
|
|
|
if (cr)
|
|
|
|
|
goto invalid;
|
|
|
|
|
if (dlci == NULL)
|
|
|
|
|
return;
|
|
|
|
|
gsm_dlci_close(dlci);
|
|
|
|
|
break;
|
|
|
|
|
case UI:
|
|
|
|
|
case UI|PF:
|
|
|
|
|
case UIH:
|
|
|
|
|
case UIH|PF:
|
|
|
|
|
if (dlci == NULL || dlci->state != DLCI_OPEN) {
|
2022-07-07 11:32:21 +00:00
|
|
|
|
gsm_response(gsm, address, DM|PF);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
dlci->data(dlci, gsm->buf, gsm->len);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
goto invalid;
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
invalid:
|
|
|
|
|
gsm->malformed++;
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-24 05:48:42 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm0_receive_state_check_and_fix - check and correct receive state
|
|
|
|
|
* @gsm: gsm data for this ldisc instance
|
|
|
|
|
*
|
|
|
|
|
* Ensures that the current receive state is valid for basic option mode.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
switch (gsm->state) {
|
|
|
|
|
case GSM_SEARCH:
|
|
|
|
|
case GSM0_ADDRESS:
|
|
|
|
|
case GSM0_CONTROL:
|
|
|
|
|
case GSM0_LEN0:
|
|
|
|
|
case GSM0_LEN1:
|
|
|
|
|
case GSM0_DATA:
|
|
|
|
|
case GSM0_FCS:
|
|
|
|
|
case GSM0_SSOF:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm0_receive - perform processing for non-transparency
|
|
|
|
|
* @gsm: gsm data for this ldisc instance
|
|
|
|
|
* @c: character
|
|
|
|
|
*
|
|
|
|
|
* Receive bytes in gsm mode 0
|
|
|
|
|
*/
|
|
|
|
|
|
2023-12-06 07:37:04 +00:00
|
|
|
|
static void gsm0_receive(struct gsm_mux *gsm, u8 c)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2010-11-04 15:17:03 +00:00
|
|
|
|
unsigned int len;
|
|
|
|
|
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm0_receive_state_check_and_fix(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
switch (gsm->state) {
|
|
|
|
|
case GSM_SEARCH: /* SOF marker */
|
|
|
|
|
if (c == GSM0_SOF) {
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_ADDRESS;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->address = 0;
|
|
|
|
|
gsm->len = 0;
|
|
|
|
|
gsm->fcs = INIT_FCS;
|
|
|
|
|
}
|
2010-11-04 15:17:03 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_ADDRESS: /* Address EA */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
if (gsm_read_ea(&gsm->address, c))
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_CONTROL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_CONTROL: /* Control Byte */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
gsm->control = c;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_LEN0;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_LEN0: /* Length EA */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
if (gsm_read_ea(&gsm->len, c)) {
|
|
|
|
|
if (gsm->len > gsm->mru) {
|
|
|
|
|
gsm->bad_size++;
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
gsm->count = 0;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
if (!gsm->len)
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_FCS;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
else
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_DATA;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_LEN1;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_LEN1:
|
2010-11-04 15:17:03 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
len = c;
|
|
|
|
|
gsm->len |= len << 7;
|
|
|
|
|
if (gsm->len > gsm->mru) {
|
|
|
|
|
gsm->bad_size++;
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2010-11-04 15:17:03 +00:00
|
|
|
|
gsm->count = 0;
|
|
|
|
|
if (!gsm->len)
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_FCS;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
else
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_DATA;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_DATA: /* Data */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->buf[gsm->count++] = c;
|
2024-04-24 05:48:41 +00:00
|
|
|
|
if (gsm->count >= MAX_MRU) {
|
|
|
|
|
gsm->bad_size++;
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
} else if (gsm->count >= gsm->len) {
|
2022-04-14 09:42:11 +00:00
|
|
|
|
/* Calculate final FCS for UI frames over all data */
|
|
|
|
|
if ((gsm->control & ~PF) != UIH) {
|
|
|
|
|
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
|
|
|
|
|
gsm->count);
|
|
|
|
|
}
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_FCS;
|
2022-04-14 09:42:11 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_FCS: /* FCS follows the packet */
|
2022-04-14 09:42:11 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM0_SSOF;
|
2010-11-04 15:17:03 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM0_SSOF:
|
2022-04-14 09:42:11 +00:00
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
if (c == GSM0_SOF)
|
|
|
|
|
gsm_queue(gsm);
|
|
|
|
|
else
|
|
|
|
|
gsm->bad_size++;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2020-02-19 08:49:43 +00:00
|
|
|
|
default:
|
|
|
|
|
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-24 05:48:42 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm1_receive_state_check_and_fix - check and correct receive state
|
|
|
|
|
* @gsm: gsm data for this ldisc instance
|
|
|
|
|
*
|
|
|
|
|
* Ensures that the current receive state is valid for advanced option mode.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
switch (gsm->state) {
|
|
|
|
|
case GSM_SEARCH:
|
|
|
|
|
case GSM1_START:
|
|
|
|
|
case GSM1_ADDRESS:
|
|
|
|
|
case GSM1_CONTROL:
|
|
|
|
|
case GSM1_DATA:
|
|
|
|
|
case GSM1_OVERRUN:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
2010-11-04 15:17:03 +00:00
|
|
|
|
* gsm1_receive - perform processing for non-transparency
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @gsm: gsm data for this ldisc instance
|
|
|
|
|
* @c: character
|
|
|
|
|
*
|
|
|
|
|
* Receive bytes in mode 1 (Advanced option)
|
|
|
|
|
*/
|
|
|
|
|
|
2023-12-06 07:37:04 +00:00
|
|
|
|
static void gsm1_receive(struct gsm_mux *gsm, u8 c)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm1_receive_state_check_and_fix(gsm);
|
2022-04-22 07:10:25 +00:00
|
|
|
|
/* handle XON/XOFF */
|
|
|
|
|
if ((c & ISO_IEC_646_MASK) == XON) {
|
|
|
|
|
gsm->constipated = true;
|
|
|
|
|
return;
|
|
|
|
|
} else if ((c & ISO_IEC_646_MASK) == XOFF) {
|
|
|
|
|
gsm->constipated = false;
|
|
|
|
|
/* Kick the link in case it is idling */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsmld_write_trigger(gsm);
|
2022-04-22 07:10:25 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (c == GSM1_SOF) {
|
2022-04-14 09:42:11 +00:00
|
|
|
|
/* EOF is only valid in frame if we have got to the data state */
|
2024-04-24 05:48:42 +00:00
|
|
|
|
if (gsm->state == GSM1_DATA) {
|
2022-04-14 09:42:11 +00:00
|
|
|
|
if (gsm->count < 1) {
|
|
|
|
|
/* Missing FSC */
|
|
|
|
|
gsm->malformed++;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_START;
|
2022-04-14 09:42:11 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* Remove the FCS from data */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->count--;
|
2022-04-14 09:42:11 +00:00
|
|
|
|
if ((gsm->control & ~PF) != UIH) {
|
|
|
|
|
/* Calculate final FCS for UI frames over all
|
|
|
|
|
* data but FCS
|
|
|
|
|
*/
|
|
|
|
|
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
|
|
|
|
|
gsm->count);
|
|
|
|
|
}
|
|
|
|
|
/* Add the FCS itself to test against GOOD_FCS */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
|
|
|
|
|
gsm->len = gsm->count;
|
|
|
|
|
gsm_queue(gsm);
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_START;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
/* Any partial frame was a runt so go back to start */
|
2024-04-24 05:48:42 +00:00
|
|
|
|
if (gsm->state != GSM1_START) {
|
2022-04-14 09:42:12 +00:00
|
|
|
|
if (gsm->state != GSM_SEARCH)
|
|
|
|
|
gsm->malformed++;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_START;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
/* A SOF in GSM_START means we are still reading idling or
|
|
|
|
|
framing bytes */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (c == GSM1_ESCAPE) {
|
2020-02-19 08:49:49 +00:00
|
|
|
|
gsm->escape = true;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Only an unescaped SOF gets us out of GSM search */
|
|
|
|
|
if (gsm->state == GSM_SEARCH)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (gsm->escape) {
|
|
|
|
|
c ^= GSM1_ESCAPE_BITS;
|
2020-02-19 08:49:49 +00:00
|
|
|
|
gsm->escape = false;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
switch (gsm->state) {
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM1_START: /* First byte after SOF */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->address = 0;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_ADDRESS;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = INIT_FCS;
|
2020-08-23 22:36:59 +00:00
|
|
|
|
fallthrough;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM1_ADDRESS: /* Address continuation */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
if (gsm_read_ea(&gsm->address, c))
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_CONTROL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM1_CONTROL: /* Control Byte */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
|
|
|
|
|
gsm->control = c;
|
|
|
|
|
gsm->count = 0;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_DATA;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM1_DATA: /* Data */
|
2024-04-24 05:48:41 +00:00
|
|
|
|
if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */
|
2024-04-24 05:48:42 +00:00
|
|
|
|
gsm->state = GSM1_OVERRUN;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->bad_size++;
|
|
|
|
|
} else
|
|
|
|
|
gsm->buf[gsm->count++] = c;
|
|
|
|
|
break;
|
2024-04-24 05:48:42 +00:00
|
|
|
|
case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
2020-02-19 08:49:43 +00:00
|
|
|
|
default:
|
|
|
|
|
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
|
|
|
|
|
break;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_error - handle tty error
|
|
|
|
|
* @gsm: ldisc data
|
|
|
|
|
*
|
|
|
|
|
* Handle an error in the receipt of data for a frame. Currently we just
|
|
|
|
|
* go back to hunting for a SOF.
|
|
|
|
|
*
|
|
|
|
|
* FIXME: better diagnostics ?
|
|
|
|
|
*/
|
|
|
|
|
|
2021-11-18 07:17:16 +00:00
|
|
|
|
static void gsm_error(struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
gsm->state = GSM_SEARCH;
|
|
|
|
|
gsm->io_error++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_cleanup_mux - generic GSM protocol cleanup
|
|
|
|
|
* @gsm: our mux
|
2022-04-14 09:42:07 +00:00
|
|
|
|
* @disc: disconnect link?
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Clean up the bits of the mux which are the same for all framing
|
|
|
|
|
* protocols. Remove the mux from the mux table, stop all the timers
|
|
|
|
|
* and then shut down each device hanging up the channels as we go.
|
|
|
|
|
*/
|
|
|
|
|
|
2022-04-14 09:42:07 +00:00
|
|
|
|
static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
int i;
|
2023-08-11 03:11:21 +00:00
|
|
|
|
struct gsm_dlci *dlci;
|
2012-08-13 12:45:15 +00:00
|
|
|
|
struct gsm_msg *txq, *ntxq;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2020-02-19 08:49:46 +00:00
|
|
|
|
gsm->dead = true;
|
2022-04-14 09:42:07 +00:00
|
|
|
|
mutex_lock(&gsm->mutex);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2023-08-11 03:11:21 +00:00
|
|
|
|
dlci = gsm->dlci[0];
|
2022-04-14 09:42:07 +00:00
|
|
|
|
if (dlci) {
|
|
|
|
|
if (disc && dlci->state != DLCI_CLOSED) {
|
|
|
|
|
gsm_dlci_begin_close(dlci);
|
|
|
|
|
wait_event(gsm->event, dlci->state == DLCI_CLOSED);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2022-04-14 09:42:07 +00:00
|
|
|
|
dlci->dead = true;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-04-14 09:42:07 +00:00
|
|
|
|
/* Finish outstanding timers, making sure they are done */
|
2022-10-08 11:02:21 +00:00
|
|
|
|
del_timer_sync(&gsm->kick_timer);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
del_timer_sync(&gsm->t2_timer);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
del_timer_sync(&gsm->ka_timer);
|
2017-05-31 06:19:05 +00:00
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
/* Finish writing to ldisc */
|
|
|
|
|
flush_work(&gsm->tx_work);
|
|
|
|
|
|
2022-04-14 09:42:14 +00:00
|
|
|
|
/* Free up any link layer users and finally the control channel */
|
2022-07-01 06:16:45 +00:00
|
|
|
|
if (gsm->has_devices) {
|
|
|
|
|
gsm_unregister_devices(gsm_tty_driver, gsm->num);
|
|
|
|
|
gsm->has_devices = false;
|
|
|
|
|
}
|
2022-04-14 09:42:14 +00:00
|
|
|
|
for (i = NUM_DLCI - 1; i >= 0; i--)
|
2023-09-14 05:15:07 +00:00
|
|
|
|
if (gsm->dlci[i])
|
2011-06-16 21:20:13 +00:00
|
|
|
|
gsm_dlci_release(gsm->dlci[i]);
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
mutex_unlock(&gsm->mutex);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Now wipe the queues */
|
2022-04-14 09:42:15 +00:00
|
|
|
|
tty_ldisc_flush(gsm->tty);
|
2024-09-26 13:02:13 +00:00
|
|
|
|
|
|
|
|
|
guard(spinlock_irqsave)(&gsm->tx_lock);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list)
|
|
|
|
|
kfree(txq);
|
|
|
|
|
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
|
|
|
|
|
list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
kfree(txq);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
INIT_LIST_HEAD(&gsm->tx_data_list);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_activate_mux - generic GSM setup
|
|
|
|
|
* @gsm: our mux
|
|
|
|
|
*
|
|
|
|
|
* Set up the bits of the mux which are the same for all framing
|
|
|
|
|
* protocols. Add the mux to the mux table so it can be opened and
|
|
|
|
|
* finally kick off connecting to DLCI 0 on the modem.
|
|
|
|
|
*/
|
|
|
|
|
|
2013-12-16 10:58:24 +00:00
|
|
|
|
static int gsm_activate_mux(struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci;
|
2022-07-01 06:16:45 +00:00
|
|
|
|
int ret;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-07-01 12:23:32 +00:00
|
|
|
|
dlci = gsm_dlci_alloc(gsm, 0);
|
|
|
|
|
if (dlci == NULL)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->receive = gsm0_receive;
|
|
|
|
|
else
|
|
|
|
|
gsm->receive = gsm1_receive;
|
|
|
|
|
|
2022-07-01 06:16:45 +00:00
|
|
|
|
ret = gsm_register_devices(gsm_tty_driver, gsm->num);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
gsm->has_devices = true;
|
2020-02-19 08:49:46 +00:00
|
|
|
|
gsm->dead = false; /* Tty opens are now permissible */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_free_mux - free up a mux
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @gsm: mux to free
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
2011-06-16 21:20:13 +00:00
|
|
|
|
* Dispose of allocated resources for a dead mux
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*/
|
2013-12-16 10:58:24 +00:00
|
|
|
|
static void gsm_free_mux(struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2022-04-14 09:42:08 +00:00
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_MUX; i++) {
|
|
|
|
|
if (gsm == gsm_mux[i]) {
|
|
|
|
|
gsm_mux[i] = NULL;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
mutex_destroy(&gsm->mutex);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
kfree(gsm->txframe);
|
|
|
|
|
kfree(gsm->buf);
|
|
|
|
|
kfree(gsm);
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-16 21:20:13 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_free_muxr - free up a mux
|
2020-08-18 08:56:52 +00:00
|
|
|
|
* @ref: kreference to the mux to free
|
2011-06-16 21:20:13 +00:00
|
|
|
|
*
|
|
|
|
|
* Dispose of allocated resources for a dead mux
|
|
|
|
|
*/
|
|
|
|
|
static void gsm_free_muxr(struct kref *ref)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref);
|
|
|
|
|
gsm_free_mux(gsm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void mux_get(struct gsm_mux *gsm)
|
|
|
|
|
{
|
2022-04-14 09:42:08 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gsm_mux_lock, flags);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
kref_get(&gsm->ref);
|
2022-04-14 09:42:08 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm_mux_lock, flags);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void mux_put(struct gsm_mux *gsm)
|
|
|
|
|
{
|
2022-04-14 09:42:08 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
|
|
spin_lock_irqsave(&gsm_mux_lock, flags);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
kref_put(&gsm->ref, gsm_free_muxr);
|
2022-04-14 09:42:08 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm_mux_lock, flags);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-10 19:26:55 +00:00
|
|
|
|
static inline unsigned int mux_num_to_base(struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
return gsm->num * NUM_DLCI;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline unsigned int mux_line_to_num(unsigned int line)
|
|
|
|
|
{
|
|
|
|
|
return line / NUM_DLCI;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_alloc_mux - allocate a mux
|
|
|
|
|
*
|
|
|
|
|
* Creates a new mux ready for activation.
|
|
|
|
|
*/
|
|
|
|
|
|
2013-12-16 10:58:24 +00:00
|
|
|
|
static struct gsm_mux *gsm_alloc_mux(void)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2022-04-14 09:42:08 +00:00
|
|
|
|
int i;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL);
|
|
|
|
|
if (gsm == NULL)
|
|
|
|
|
return NULL;
|
|
|
|
|
gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL);
|
|
|
|
|
if (gsm->buf == NULL) {
|
|
|
|
|
kfree(gsm);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
2022-04-14 09:42:13 +00:00
|
|
|
|
gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (gsm->txframe == NULL) {
|
|
|
|
|
kfree(gsm->buf);
|
|
|
|
|
kfree(gsm);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
spin_lock_init(&gsm->lock);
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
mutex_init(&gsm->mutex);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
kref_init(&gsm->ref);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
|
|
|
|
|
INIT_LIST_HEAD(&gsm->tx_data_list);
|
2022-10-08 11:02:21 +00:00
|
|
|
|
timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
|
2022-08-27 13:47:19 +00:00
|
|
|
|
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
timer_setup(&gsm->ka_timer, gsm_control_keep_alive, 0);
|
2022-08-27 13:47:19 +00:00
|
|
|
|
INIT_WORK(&gsm->tx_work, gsmld_write_task);
|
|
|
|
|
init_waitqueue_head(&gsm->event);
|
|
|
|
|
spin_lock_init(&gsm->control_lock);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_init(&gsm->tx_lock);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
gsm->t1 = T1;
|
|
|
|
|
gsm->t2 = T2;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
gsm->t3 = T3;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->n2 = N2;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
gsm->k = K;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->ftype = UIH;
|
|
|
|
|
gsm->adaption = 1;
|
2022-08-31 07:37:55 +00:00
|
|
|
|
gsm->encoding = GSM_ADV_OPT;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
|
|
|
|
|
gsm->mtu = 64;
|
2020-02-19 08:49:46 +00:00
|
|
|
|
gsm->dead = true; /* Avoid early tty opens */
|
2023-03-15 10:53:52 +00:00
|
|
|
|
gsm->wait_config = false; /* Disabled */
|
2023-02-14 12:27:37 +00:00
|
|
|
|
gsm->keep_alive = 0; /* Disabled */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-04-14 09:42:08 +00:00
|
|
|
|
/* Store the instance to the mux array or abort if no space is
|
|
|
|
|
* available.
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&gsm_mux_lock);
|
|
|
|
|
for (i = 0; i < MAX_MUX; i++) {
|
|
|
|
|
if (!gsm_mux[i]) {
|
|
|
|
|
gsm_mux[i] = gsm;
|
|
|
|
|
gsm->num = i;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&gsm_mux_lock);
|
|
|
|
|
if (i == MAX_MUX) {
|
|
|
|
|
mutex_destroy(&gsm->mutex);
|
|
|
|
|
kfree(gsm->txframe);
|
|
|
|
|
kfree(gsm->buf);
|
|
|
|
|
kfree(gsm);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return gsm;
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-14 01:25:26 +00:00
|
|
|
|
static void gsm_copy_config_values(struct gsm_mux *gsm,
|
|
|
|
|
struct gsm_config *c)
|
|
|
|
|
{
|
|
|
|
|
memset(c, 0, sizeof(*c));
|
|
|
|
|
c->adaption = gsm->adaption;
|
|
|
|
|
c->encapsulation = gsm->encoding;
|
|
|
|
|
c->initiator = gsm->initiator;
|
|
|
|
|
c->t1 = gsm->t1;
|
|
|
|
|
c->t2 = gsm->t2;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
c->t3 = gsm->t3;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
c->n2 = gsm->n2;
|
|
|
|
|
if (gsm->ftype == UIH)
|
|
|
|
|
c->i = 1;
|
|
|
|
|
else
|
|
|
|
|
c->i = 2;
|
|
|
|
|
pr_debug("Ftype %d i %d\n", gsm->ftype, c->i);
|
|
|
|
|
c->mru = gsm->mru;
|
|
|
|
|
c->mtu = gsm->mtu;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
c->k = gsm->k;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
|
|
|
|
|
{
|
|
|
|
|
int need_close = 0;
|
|
|
|
|
int need_restart = 0;
|
|
|
|
|
|
2023-03-15 10:53:53 +00:00
|
|
|
|
/* Stuff we don't support yet - UI or I frame transport */
|
|
|
|
|
if (c->adaption != 1 && c->adaption != 2)
|
2019-01-14 01:25:26 +00:00
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
/* Check the MRU/MTU range looks sane */
|
2022-11-03 09:17:41 +00:00
|
|
|
|
if (c->mru < MIN_MTU || c->mtu < MIN_MTU)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (c->mru > MAX_MRU || c->mtu > MAX_MTU)
|
2019-01-14 01:25:26 +00:00
|
|
|
|
return -EINVAL;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
if (c->t3 > MAX_T3)
|
|
|
|
|
return -EINVAL;
|
2022-04-14 09:42:16 +00:00
|
|
|
|
if (c->n2 > 255)
|
2019-01-14 01:25:26 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (c->encapsulation > 1) /* Basic, advanced, no I */
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (c->initiator > 1)
|
|
|
|
|
return -EINVAL;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
if (c->k > MAX_WINDOW_SIZE)
|
|
|
|
|
return -EINVAL;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
if (c->i == 0 || c->i > 2) /* UIH and UI only */
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
/*
|
|
|
|
|
* See what is needed for reconfiguration
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
/* Timing fields */
|
|
|
|
|
if (c->t1 != 0 && c->t1 != gsm->t1)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
if (c->t2 != 0 && c->t2 != gsm->t2)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
if (c->encapsulation != gsm->encoding)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
if (c->adaption != gsm->adaption)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
/* Requires care */
|
|
|
|
|
if (c->initiator != gsm->initiator)
|
|
|
|
|
need_close = 1;
|
|
|
|
|
if (c->mru != gsm->mru)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
if (c->mtu != gsm->mtu)
|
|
|
|
|
need_restart = 1;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Close down what is needed, restart and initiate the new
|
2022-04-14 09:42:07 +00:00
|
|
|
|
* configuration. On the first time there is no DLCI[0]
|
|
|
|
|
* and closing or cleaning up is not necessary.
|
2019-01-14 01:25:26 +00:00
|
|
|
|
*/
|
2022-04-14 09:42:07 +00:00
|
|
|
|
if (need_close || need_restart)
|
|
|
|
|
gsm_cleanup_mux(gsm, true);
|
2019-01-14 01:25:26 +00:00
|
|
|
|
|
|
|
|
|
gsm->initiator = c->initiator;
|
|
|
|
|
gsm->mru = c->mru;
|
|
|
|
|
gsm->mtu = c->mtu;
|
2022-08-31 07:37:55 +00:00
|
|
|
|
gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
gsm->adaption = c->adaption;
|
|
|
|
|
gsm->n2 = c->n2;
|
|
|
|
|
|
|
|
|
|
if (c->i == 1)
|
|
|
|
|
gsm->ftype = UIH;
|
|
|
|
|
else if (c->i == 2)
|
|
|
|
|
gsm->ftype = UI;
|
|
|
|
|
|
|
|
|
|
if (c->t1)
|
|
|
|
|
gsm->t1 = c->t1;
|
|
|
|
|
if (c->t2)
|
|
|
|
|
gsm->t2 = c->t2;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
if (c->t3)
|
|
|
|
|
gsm->t3 = c->t3;
|
|
|
|
|
if (c->k)
|
|
|
|
|
gsm->k = c->k;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* FIXME: We need to separate activation/deactivation from adding
|
|
|
|
|
* and removing from the mux array
|
|
|
|
|
*/
|
2022-05-04 08:17:32 +00:00
|
|
|
|
if (gsm->dead) {
|
2023-08-17 09:32:25 +00:00
|
|
|
|
int ret = gsm_activate_mux(gsm);
|
2022-05-04 08:17:32 +00:00
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
if (gsm->initiator)
|
|
|
|
|
gsm_dlci_begin_open(gsm->dlci[0]);
|
|
|
|
|
}
|
2019-01-14 01:25:26 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-14 12:27:37 +00:00
|
|
|
|
static void gsm_copy_config_ext_values(struct gsm_mux *gsm,
|
|
|
|
|
struct gsm_config_ext *ce)
|
|
|
|
|
{
|
|
|
|
|
memset(ce, 0, sizeof(*ce));
|
2023-03-15 10:53:52 +00:00
|
|
|
|
ce->wait_config = gsm->wait_config ? 1 : 0;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
ce->keep_alive = gsm->keep_alive;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
|
|
|
|
|
{
|
2023-08-17 09:32:31 +00:00
|
|
|
|
bool need_restart = false;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Check that userspace doesn't put stuff in here to prevent breakages
|
|
|
|
|
* in the future.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(ce->reserved); i++)
|
|
|
|
|
if (ce->reserved[i])
|
|
|
|
|
return -EINVAL;
|
2023-08-17 09:32:31 +00:00
|
|
|
|
if (ce->flags & ~GSM_FL_RESTART)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* Requires care */
|
|
|
|
|
if (ce->flags & GSM_FL_RESTART)
|
|
|
|
|
need_restart = true;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Close down what is needed, restart and initiate the new
|
|
|
|
|
* configuration. On the first time there is no DLCI[0]
|
|
|
|
|
* and closing or cleaning up is not necessary.
|
|
|
|
|
*/
|
|
|
|
|
if (need_restart)
|
|
|
|
|
gsm_cleanup_mux(gsm, true);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
|
2023-03-15 10:53:52 +00:00
|
|
|
|
/*
|
|
|
|
|
* Setup the new configuration values
|
|
|
|
|
*/
|
|
|
|
|
gsm->wait_config = ce->wait_config ? true : false;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
gsm->keep_alive = ce->keep_alive;
|
2023-03-15 10:53:52 +00:00
|
|
|
|
|
2023-08-17 09:32:31 +00:00
|
|
|
|
if (gsm->dead) {
|
|
|
|
|
int ret = gsm_activate_mux(gsm);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
if (gsm->initiator)
|
|
|
|
|
gsm_dlci_begin_open(gsm->dlci[0]);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-14 12:27:37 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsmld_output - write to link
|
|
|
|
|
* @gsm: our mux
|
|
|
|
|
* @data: bytes to output
|
|
|
|
|
* @len: size
|
|
|
|
|
*
|
|
|
|
|
* Write a block of data from the GSM mux to the data channel. This
|
|
|
|
|
* will eventually be serialized from above but at the moment isn't.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len)
|
|
|
|
|
{
|
|
|
|
|
if (tty_write_room(gsm->tty) < len) {
|
|
|
|
|
set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags);
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
}
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_DATA)
|
2022-05-12 13:15:06 +00:00
|
|
|
|
gsm_hex_dump_bytes(__func__, data, len);
|
2021-09-30 06:06:24 +00:00
|
|
|
|
return gsm->tty->ops->write(gsm->tty, data, len);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_write_trigger - schedule ldisc write task
|
|
|
|
|
* @gsm: our mux
|
|
|
|
|
*/
|
|
|
|
|
static void gsmld_write_trigger(struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead)
|
|
|
|
|
return;
|
|
|
|
|
schedule_work(&gsm->tx_work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_write_task - ldisc write task
|
|
|
|
|
* @work: our tx write work
|
|
|
|
|
*
|
|
|
|
|
* Writes out data to the ldisc if possible. We are doing this here to
|
|
|
|
|
* avoid dead-locking. This returns if no space or data is left for output.
|
|
|
|
|
*/
|
|
|
|
|
static void gsmld_write_task(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
|
|
/* All outstanding control channel and control messages and one data
|
|
|
|
|
* frame is sent.
|
|
|
|
|
*/
|
|
|
|
|
ret = -ENODEV;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
if (gsm->tty)
|
|
|
|
|
ret = gsm_data_kick(gsm);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
|
|
|
|
|
if (ret >= 0)
|
|
|
|
|
for (i = 0; i < NUM_DLCI; i++)
|
|
|
|
|
if (gsm->dlci[i])
|
|
|
|
|
tty_port_tty_wakeup(&gsm->dlci[i]->port);
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsmld_attach_gsm - mode set up
|
|
|
|
|
* @tty: our tty structure
|
|
|
|
|
* @gsm: our mux
|
|
|
|
|
*
|
|
|
|
|
* Set up the MUX for basic mode and commence connecting to the
|
|
|
|
|
* modem. Currently called from the line discipline set up but
|
|
|
|
|
* will need moving to an ioctl path.
|
|
|
|
|
*/
|
|
|
|
|
|
2022-07-01 06:16:45 +00:00
|
|
|
|
static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
gsm->tty = tty_kref_get(tty);
|
2022-04-22 07:10:25 +00:00
|
|
|
|
/* Turn off tty XON/XOFF handling to handle it explicitly. */
|
|
|
|
|
gsm->old_c_iflag = tty->termios.c_iflag;
|
|
|
|
|
tty->termios.c_iflag &= (IXON | IXOFF);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_detach_gsm - stop doing 0710 mux
|
2011-05-10 08:16:21 +00:00
|
|
|
|
* @tty: tty attached to the mux
|
2010-03-26 11:32:54 +00:00
|
|
|
|
* @gsm: mux
|
|
|
|
|
*
|
|
|
|
|
* Shutdown and then clean up the resources used by the line discipline
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
|
|
|
|
|
{
|
|
|
|
|
WARN_ON(tty != gsm->tty);
|
2022-04-22 07:10:25 +00:00
|
|
|
|
/* Restore tty XON/XOFF handling. */
|
|
|
|
|
gsm->tty->termios.c_iflag = gsm->old_c_iflag;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_kref_put(gsm->tty);
|
|
|
|
|
gsm->tty = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-10 09:14:50 +00:00
|
|
|
|
static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp,
|
2023-08-10 09:14:51 +00:00
|
|
|
|
const u8 *fp, size_t count)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
2023-12-06 07:37:04 +00:00
|
|
|
|
u8 flags = TTY_NORMAL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_DATA)
|
2022-05-12 13:15:06 +00:00
|
|
|
|
gsm_hex_dump_bytes(__func__, cp, count);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2021-03-02 06:22:11 +00:00
|
|
|
|
for (; count; count--, cp++) {
|
|
|
|
|
if (fp)
|
|
|
|
|
flags = *fp++;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
switch (flags) {
|
|
|
|
|
case TTY_NORMAL:
|
2022-08-14 01:52:12 +00:00
|
|
|
|
if (gsm->receive)
|
|
|
|
|
gsm->receive(gsm, *cp);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
case TTY_OVERRUN:
|
|
|
|
|
case TTY_BREAK:
|
|
|
|
|
case TTY_PARITY:
|
|
|
|
|
case TTY_FRAME:
|
2021-11-18 07:17:16 +00:00
|
|
|
|
gsm_error(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
default:
|
2012-08-13 12:43:58 +00:00
|
|
|
|
WARN_ONCE(1, "%s: unknown flag %d\n",
|
2015-03-31 13:55:59 +00:00
|
|
|
|
tty_name(tty), flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
/* FASYNC if needed ? */
|
|
|
|
|
/* If clogged call tty_throttle(tty); */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_flush_buffer - clean input queue
|
|
|
|
|
* @tty: terminal device
|
|
|
|
|
*
|
|
|
|
|
* Flush the input buffer. Called when the line discipline is
|
|
|
|
|
* being closed, when the tty layer wants the buffer flushed (eg
|
|
|
|
|
* at hangup).
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsmld_flush_buffer(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_close - close the ldisc for this tty
|
|
|
|
|
* @tty: device
|
|
|
|
|
*
|
|
|
|
|
* Called from the terminal layer when this line discipline is
|
|
|
|
|
* being shut down, either because of a close or becsuse of a
|
|
|
|
|
* discipline change. The function will not be called while other
|
|
|
|
|
* ldisc methods are in progress.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsmld_close(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
|
|
|
|
|
2022-04-14 09:42:09 +00:00
|
|
|
|
/* The ldisc locks and closes the port before calling our close. This
|
|
|
|
|
* means we have no way to do a proper disconnect. We will not bother
|
|
|
|
|
* to do one.
|
|
|
|
|
*/
|
|
|
|
|
gsm_cleanup_mux(gsm, false);
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsmld_detach_gsm(tty, gsm);
|
|
|
|
|
|
|
|
|
|
gsmld_flush_buffer(tty);
|
|
|
|
|
/* Do other clean up here */
|
2011-06-16 21:20:13 +00:00
|
|
|
|
mux_put(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_open - open an ldisc
|
|
|
|
|
* @tty: terminal to open
|
|
|
|
|
*
|
|
|
|
|
* Called when this line discipline is being attached to the
|
|
|
|
|
* terminal device. Can sleep. Called serialized so that no
|
|
|
|
|
* other events will occur in parallel. No further open will occur
|
|
|
|
|
* until a close.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsmld_open(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm;
|
|
|
|
|
|
2023-07-31 18:59:42 +00:00
|
|
|
|
if (!capable(CAP_NET_ADMIN))
|
|
|
|
|
return -EPERM;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (tty->ops->write == NULL)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
/* Attach our ldisc data */
|
|
|
|
|
gsm = gsm_alloc_mux();
|
|
|
|
|
if (gsm == NULL)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
tty->disc_data = gsm;
|
|
|
|
|
tty->receive_room = 65536;
|
|
|
|
|
|
|
|
|
|
/* Attach the initial passive connection */
|
2022-07-01 06:16:45 +00:00
|
|
|
|
gsmld_attach_gsm(tty, gsm);
|
|
|
|
|
|
2023-03-15 10:53:52 +00:00
|
|
|
|
/* The mux will not be activated yet, we wait for correct
|
|
|
|
|
* configuration first.
|
|
|
|
|
*/
|
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT)
|
|
|
|
|
gsm->receive = gsm0_receive;
|
|
|
|
|
else
|
|
|
|
|
gsm->receive = gsm1_receive;
|
|
|
|
|
|
2022-07-01 06:16:45 +00:00
|
|
|
|
return 0;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_write_wakeup - asynchronous I/O notifier
|
|
|
|
|
* @tty: tty device
|
|
|
|
|
*
|
|
|
|
|
* Required for the ptys, serial driver etc. since processes
|
|
|
|
|
* that attach themselves to the master and rely on ASYNC
|
|
|
|
|
* IO must be woken up
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsmld_write_wakeup(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
|
|
|
|
|
|
|
|
|
/* Queue poll */
|
tty: n_gsm: fix deadlock and link starvation in outgoing data path
The current implementation queues up new control and user packets as needed
and processes this queue down to the ldisc in the same code path.
That means that the upper and the lower layer are hard coupled in the code.
Due to this deadlocks can happen as seen below while transmitting data,
especially during ldisc congestion. Furthermore, the data channels starve
the control channel on high transmission load on the ldisc.
Introduce an additional control channel data queue to prevent timeouts and
link hangups during ldisc congestion. This is being processed before the
user channel data queue in gsm_data_kick(), i.e. with the highest priority.
Put the queue to ldisc data path into a workqueue and trigger it whenever
new data has been put into the transmission queue. Change
gsm_dlci_data_sweep() accordingly to fill up the transmission queue until
TX_THRESH_HI. This solves the locking issue, keeps latency low and provides
good performance on high data load.
Note that now all packets from a DLCI are removed from the internal queue
if the associated DLCI was closed. This ensures that no data is sent by the
introduced write task to an already closed DLCI.
BUG: spinlock recursion on CPU#0, test_v24_loop/124
lock: serial8250_ports+0x3a8/0x7500, .magic: dead4ead, .owner: test_v24_loop/124, .owner_cpu: 0
CPU: 0 PID: 124 Comm: test_v24_loop Tainted: G O 5.18.0-rc2 #3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
Call Trace:
<IRQ>
dump_stack_lvl+0x34/0x44
do_raw_spin_lock+0x76/0xa0
_raw_spin_lock_irqsave+0x72/0x80
uart_write_room+0x3b/0xc0
gsm_data_kick+0x14b/0x240 [n_gsm]
gsmld_write_wakeup+0x35/0x70 [n_gsm]
tty_wakeup+0x53/0x60
tty_port_default_wakeup+0x1b/0x30
serial8250_tx_chars+0x12f/0x220
serial8250_handle_irq.part.0+0xfe/0x150
serial8250_default_handle_irq+0x48/0x80
serial8250_interrupt+0x56/0xa0
__handle_irq_event_percpu+0x78/0x1f0
handle_irq_event+0x34/0x70
handle_fasteoi_irq+0x90/0x1e0
__common_interrupt+0x69/0x100
common_interrupt+0x48/0xc0
asm_common_interrupt+0x1e/0x40
RIP: 0010:__do_softirq+0x83/0x34e
Code: 2a 0a ff 0f b7 ed c7 44 24 10 0a 00 00 00 48 c7 c7 51 2a 64 82 e8 2d
e2 d5 ff 65 66 c7 05 83 af 1e 7e 00 00 fb b8 ff ff ff ff <49> c7 c2 40 61
80 82 0f bc c5 41 89 c4 41 83 c4 01 0f 84 e6 00 00
RSP: 0018:ffffc90000003f98 EFLAGS: 00000286
RAX: 00000000ffffffff RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000000 RSI: ffffffff82642a51 RDI: ffffffff825bb5e7
RBP: 0000000000000200 R08: 00000008de3271a8 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000030 R14: 0000000000000000 R15: 0000000000000000
? __do_softirq+0x73/0x34e
irq_exit_rcu+0xb5/0x100
common_interrupt+0xa4/0xc0
</IRQ>
<TASK>
asm_common_interrupt+0x1e/0x40
RIP: 0010:_raw_spin_unlock_irqrestore+0x2e/0x50
Code: 00 55 48 89 fd 48 83 c7 18 53 48 89 f3 48 8b 74 24 10 e8 85 28 36 ff
48 89 ef e8 cd 58 36 ff 80 e7 02 74 01 fb bf 01 00 00 00 <e8> 3d 97 33 ff
65 8b 05 96 23 2b 7e 85 c0 74 03 5b 5d c3 0f 1f 44
RSP: 0018:ffffc9000020fd08 EFLAGS: 00000202
RAX: 0000000000000000 RBX: 0000000000000246 RCX: 0000000000000000
RDX: 0000000000000004 RSI: ffffffff8257fd74 RDI: 0000000000000001
RBP: ffff8880057de3a0 R08: 00000008de233000 R09: 0000000000000000
R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
R13: 0000000000000100 R14: 0000000000000202 R15: ffff8880057df0b8
? _raw_spin_unlock_irqrestore+0x23/0x50
gsmtty_write+0x65/0x80 [n_gsm]
n_tty_write+0x33f/0x530
? swake_up_all+0xe0/0xe0
file_tty_write.constprop.0+0x1b1/0x320
? n_tty_flush_buffer+0xb0/0xb0
new_sync_write+0x10c/0x190
vfs_write+0x282/0x310
ksys_write+0x68/0xe0
do_syscall_64+0x3b/0x90
entry_SYSCALL_64_after_hwframe+0x44/0xae
RIP: 0033:0x7f3e5e35c15c
Code: 8b 7c 24 08 89 c5 e8 c5 ff ff ff 89 ef 89 44 24 08 e8 58 bc 02 00 8b
44 24 08 48 83 c4 10 5d c3 48 63 ff b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff
ff 76 10 48 8b 15 fd fc 05 00 f7 d8 64 89 02 48 83
RSP: 002b:00007ffcee77cd18 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 00007ffcee77cd70 RCX: 00007f3e5e35c15c
RDX: 0000000000000100 RSI: 00007ffcee77cd90 RDI: 0000000000000003
RBP: 0000000000000100 R08: 0000000000000000 R09: 7efefefefefefeff
R10: 00007f3e5e3bddeb R11: 0000000000000246 R12: 00007ffcee77ce8f
R13: 0000000000000001 R14: 000056214404e010 R15: 00007ffcee77cd90
</TASK>
Fixes: e1eaea46bb40 ("tty: n_gsm line discipline")
Signed-off-by: Daniel Starke <daniel.starke@siemens.com>
Link: https://lore.kernel.org/r/20220701122332.2039-1-daniel.starke@siemens.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2022-07-01 12:23:31 +00:00
|
|
|
|
gsmld_write_trigger(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_read - read function for tty
|
|
|
|
|
* @tty: tty device
|
|
|
|
|
* @file: file object
|
|
|
|
|
* @buf: userspace buffer pointer
|
|
|
|
|
* @nr: size of I/O
|
2021-05-20 12:19:05 +00:00
|
|
|
|
* @cookie: unused
|
|
|
|
|
* @offset: unused
|
2010-03-26 11:32:54 +00:00
|
|
|
|
*
|
|
|
|
|
* Perform reads for the line discipline. We are guaranteed that the
|
|
|
|
|
* line discipline will not be closed under us but we may get multiple
|
|
|
|
|
* parallel readers and must handle this ourselves. We may also get
|
|
|
|
|
* a hangup. Always called in user context, may sleep.
|
|
|
|
|
*
|
|
|
|
|
* This code must be sure never to sleep through a hangup.
|
|
|
|
|
*/
|
|
|
|
|
|
2023-08-10 09:15:05 +00:00
|
|
|
|
static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, u8 *buf,
|
|
|
|
|
size_t nr, void **cookie, unsigned long offset)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_write - write function for tty
|
|
|
|
|
* @tty: tty device
|
|
|
|
|
* @file: file object
|
|
|
|
|
* @buf: userspace buffer pointer
|
|
|
|
|
* @nr: size of I/O
|
|
|
|
|
*
|
|
|
|
|
* Called when the owner of the device wants to send a frame
|
|
|
|
|
* itself (or some other control data). The data is transferred
|
|
|
|
|
* as-is and must be properly framed and checksummed as appropriate
|
|
|
|
|
* by userspace. Frames are either sent whole or not at all as this
|
|
|
|
|
* avoids pain user side.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
|
2023-08-10 09:15:05 +00:00
|
|
|
|
const u8 *buf, size_t nr)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2022-07-01 06:16:52 +00:00
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
2023-12-06 07:37:04 +00:00
|
|
|
|
size_t space;
|
2022-07-01 06:16:52 +00:00
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (!gsm)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
ret = -ENOBUFS;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
2022-07-01 06:16:52 +00:00
|
|
|
|
space = tty_write_room(tty);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (space >= nr)
|
2022-07-01 06:16:52 +00:00
|
|
|
|
ret = tty->ops->write(tty, buf, nr);
|
|
|
|
|
else
|
|
|
|
|
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
2022-07-01 06:16:52 +00:00
|
|
|
|
|
|
|
|
|
return ret;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsmld_poll - poll method for N_GSM0710
|
|
|
|
|
* @tty: terminal device
|
|
|
|
|
* @file: file accessing it
|
|
|
|
|
* @wait: poll table
|
|
|
|
|
*
|
|
|
|
|
* Called when the line discipline is asked to poll() for data or
|
|
|
|
|
* for special events. This code is not serialized with respect to
|
|
|
|
|
* other events save open/close.
|
|
|
|
|
*
|
|
|
|
|
* This code must be sure never to sleep through a hangup.
|
|
|
|
|
* Called without the kernel lock held - fine
|
|
|
|
|
*/
|
|
|
|
|
|
2017-07-03 10:39:46 +00:00
|
|
|
|
static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
poll_table *wait)
|
|
|
|
|
{
|
2017-07-03 10:39:46 +00:00
|
|
|
|
__poll_t mask = 0;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
|
|
|
|
|
|
|
|
|
poll_wait(file, &tty->read_wait, wait);
|
|
|
|
|
poll_wait(file, &tty->write_wait, wait);
|
2022-07-07 11:32:23 +00:00
|
|
|
|
|
|
|
|
|
if (gsm->dead)
|
|
|
|
|
mask |= EPOLLHUP;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (tty_hung_up_p(file))
|
2018-02-11 22:34:03 +00:00
|
|
|
|
mask |= EPOLLHUP;
|
2022-07-07 11:32:23 +00:00
|
|
|
|
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
|
|
|
|
|
mask |= EPOLLHUP;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0)
|
2018-02-11 22:34:03 +00:00
|
|
|
|
mask |= EPOLLOUT | EPOLLWRNORM;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return mask;
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-22 09:45:29 +00:00
|
|
|
|
static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
|
|
|
|
|
unsigned long arg)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_config c;
|
2023-02-14 12:27:37 +00:00
|
|
|
|
struct gsm_config_ext ce;
|
2023-03-15 10:53:54 +00:00
|
|
|
|
struct gsm_dlci_config dc;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_mux *gsm = tty->disc_data;
|
2023-04-11 16:45:32 +00:00
|
|
|
|
unsigned int base, addr;
|
2023-03-15 10:53:54 +00:00
|
|
|
|
struct gsm_dlci *dlci;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
case GSMIOC_GETCONF:
|
2019-01-14 01:25:26 +00:00
|
|
|
|
gsm_copy_config_values(gsm, &c);
|
2020-02-19 08:49:44 +00:00
|
|
|
|
if (copy_to_user((void __user *)arg, &c, sizeof(c)))
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return -EFAULT;
|
|
|
|
|
return 0;
|
|
|
|
|
case GSMIOC_SETCONF:
|
2020-02-19 08:49:44 +00:00
|
|
|
|
if (copy_from_user(&c, (void __user *)arg, sizeof(c)))
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return -EFAULT;
|
2019-01-14 01:25:26 +00:00
|
|
|
|
return gsm_config(gsm, &c);
|
2019-08-12 21:12:43 +00:00
|
|
|
|
case GSMIOC_GETFIRST:
|
|
|
|
|
base = mux_num_to_base(gsm);
|
|
|
|
|
return put_user(base + 1, (__u32 __user *)arg);
|
2023-02-14 12:27:37 +00:00
|
|
|
|
case GSMIOC_GETCONF_EXT:
|
|
|
|
|
gsm_copy_config_ext_values(gsm, &ce);
|
|
|
|
|
if (copy_to_user((void __user *)arg, &ce, sizeof(ce)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
return 0;
|
|
|
|
|
case GSMIOC_SETCONF_EXT:
|
|
|
|
|
if (copy_from_user(&ce, (void __user *)arg, sizeof(ce)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
return gsm_config_ext(gsm, &ce);
|
2023-03-15 10:53:54 +00:00
|
|
|
|
case GSMIOC_GETCONF_DLCI:
|
|
|
|
|
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
if (dc.channel == 0 || dc.channel >= NUM_DLCI)
|
|
|
|
|
return -EINVAL;
|
2023-04-11 16:45:32 +00:00
|
|
|
|
addr = array_index_nospec(dc.channel, NUM_DLCI);
|
|
|
|
|
dlci = gsm->dlci[addr];
|
2023-03-15 10:53:54 +00:00
|
|
|
|
if (!dlci) {
|
2023-04-11 16:45:32 +00:00
|
|
|
|
dlci = gsm_dlci_alloc(gsm, addr);
|
2023-03-15 10:53:54 +00:00
|
|
|
|
if (!dlci)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
gsm_dlci_copy_config_values(dlci, &dc);
|
|
|
|
|
if (copy_to_user((void __user *)arg, &dc, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
return 0;
|
|
|
|
|
case GSMIOC_SETCONF_DLCI:
|
|
|
|
|
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
if (dc.channel == 0 || dc.channel >= NUM_DLCI)
|
|
|
|
|
return -EINVAL;
|
2023-04-11 16:45:32 +00:00
|
|
|
|
addr = array_index_nospec(dc.channel, NUM_DLCI);
|
|
|
|
|
dlci = gsm->dlci[addr];
|
2023-03-15 10:53:54 +00:00
|
|
|
|
if (!dlci) {
|
2023-04-11 16:45:32 +00:00
|
|
|
|
dlci = gsm_dlci_alloc(gsm, addr);
|
2023-03-15 10:53:54 +00:00
|
|
|
|
if (!dlci)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
return gsm_dlci_config(dlci, &dc, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
default:
|
2021-09-14 09:11:24 +00:00
|
|
|
|
return n_tty_ioctl_helper(tty, cmd, arg);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-16 21:20:12 +00:00
|
|
|
|
/*
|
|
|
|
|
* Network interface
|
|
|
|
|
*
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_mux_net_open(struct net_device *net)
|
|
|
|
|
{
|
|
|
|
|
pr_debug("%s called\n", __func__);
|
|
|
|
|
netif_start_queue(net);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gsm_mux_net_close(struct net_device *net)
|
|
|
|
|
{
|
|
|
|
|
netif_stop_queue(net);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void dlci_net_free(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
if (!dlci->net) {
|
|
|
|
|
WARN_ON(1);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
dlci->adaption = dlci->prev_adaption;
|
|
|
|
|
dlci->data = dlci->prev_data;
|
|
|
|
|
free_netdev(dlci->net);
|
|
|
|
|
dlci->net = NULL;
|
|
|
|
|
}
|
|
|
|
|
static void net_free(struct kref *ref)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux_net *mux_net;
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
|
|
|
|
|
mux_net = container_of(ref, struct gsm_mux_net, ref);
|
|
|
|
|
dlci = mux_net->dlci;
|
|
|
|
|
|
|
|
|
|
if (dlci->net) {
|
|
|
|
|
unregister_netdev(dlci->net);
|
|
|
|
|
dlci_net_free(dlci);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-16 21:20:13 +00:00
|
|
|
|
static inline void muxnet_get(struct gsm_mux_net *mux_net)
|
|
|
|
|
{
|
|
|
|
|
kref_get(&mux_net->ref);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void muxnet_put(struct gsm_mux_net *mux_net)
|
|
|
|
|
{
|
|
|
|
|
kref_put(&mux_net->ref, net_free);
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-24 13:18:39 +00:00
|
|
|
|
static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb,
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct net_device *net)
|
|
|
|
|
{
|
2015-03-29 12:54:13 +00:00
|
|
|
|
struct gsm_mux_net *mux_net = netdev_priv(net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct gsm_dlci *dlci = mux_net->dlci;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_get(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
skb_queue_head(&dlci->skb_list, skb);
|
2017-03-13 11:00:50 +00:00
|
|
|
|
net->stats.tx_packets++;
|
|
|
|
|
net->stats.tx_bytes += skb->len;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
gsm_dlci_data_kick(dlci);
|
|
|
|
|
/* And tell the kernel when the last transmit started. */
|
2016-05-03 14:33:13 +00:00
|
|
|
|
netif_trans_update(net);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_put(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* called when a packet did not ack after watchdogtimeout */
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 14:23:51 +00:00
|
|
|
|
static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue)
|
2011-06-16 21:20:12 +00:00
|
|
|
|
{
|
|
|
|
|
/* Tell syslog we are hosed. */
|
|
|
|
|
dev_dbg(&net->dev, "Tx timed out.\n");
|
|
|
|
|
|
|
|
|
|
/* Update statistics */
|
2017-03-13 11:00:50 +00:00
|
|
|
|
net->stats.tx_errors++;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2023-12-06 07:37:04 +00:00
|
|
|
|
static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const u8 *in_buf, int size)
|
2011-06-16 21:20:12 +00:00
|
|
|
|
{
|
|
|
|
|
struct net_device *net = dlci->net;
|
|
|
|
|
struct sk_buff *skb;
|
2015-03-29 12:54:13 +00:00
|
|
|
|
struct gsm_mux_net *mux_net = netdev_priv(net);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_get(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
/* Allocate an sk_buff */
|
|
|
|
|
skb = dev_alloc_skb(size + NET_IP_ALIGN);
|
|
|
|
|
if (!skb) {
|
|
|
|
|
/* We got no receive buffer. */
|
2017-03-13 11:00:50 +00:00
|
|
|
|
net->stats.rx_dropped++;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_put(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:20 +00:00
|
|
|
|
skb_put_data(skb, in_buf, size);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
skb->dev = net;
|
2015-06-06 00:35:24 +00:00
|
|
|
|
skb->protocol = htons(ETH_P_IP);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
/* Ship it off to the kernel */
|
|
|
|
|
netif_rx(skb);
|
|
|
|
|
|
|
|
|
|
/* update out statistics */
|
2017-03-13 11:00:50 +00:00
|
|
|
|
net->stats.rx_packets++;
|
|
|
|
|
net->stats.rx_bytes += size;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_put(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsm_mux_net_init(struct net_device *net)
|
|
|
|
|
{
|
|
|
|
|
static const struct net_device_ops gsm_netdev_ops = {
|
|
|
|
|
.ndo_open = gsm_mux_net_open,
|
|
|
|
|
.ndo_stop = gsm_mux_net_close,
|
|
|
|
|
.ndo_start_xmit = gsm_mux_net_start_xmit,
|
|
|
|
|
.ndo_tx_timeout = gsm_mux_net_tx_timeout,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
net->netdev_ops = &gsm_netdev_ops;
|
|
|
|
|
|
|
|
|
|
/* fill in the other fields */
|
|
|
|
|
net->watchdog_timeo = GSM_NET_TX_TIMEOUT;
|
|
|
|
|
net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
|
|
|
|
|
net->type = ARPHRD_NONE;
|
|
|
|
|
net->tx_queue_len = 10;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* caller holds the dlci mutex */
|
|
|
|
|
static void gsm_destroy_network(struct gsm_dlci *dlci)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux_net *mux_net;
|
|
|
|
|
|
2020-02-19 08:49:45 +00:00
|
|
|
|
pr_debug("destroy network interface\n");
|
2011-06-16 21:20:12 +00:00
|
|
|
|
if (!dlci->net)
|
|
|
|
|
return;
|
2015-03-29 12:54:13 +00:00
|
|
|
|
mux_net = netdev_priv(dlci->net);
|
2011-06-16 21:20:13 +00:00
|
|
|
|
muxnet_put(mux_net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* caller holds the dlci mutex */
|
|
|
|
|
static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
|
|
|
|
|
{
|
|
|
|
|
char *netname;
|
|
|
|
|
int retval = 0;
|
|
|
|
|
struct net_device *net;
|
|
|
|
|
struct gsm_mux_net *mux_net;
|
|
|
|
|
|
|
|
|
|
if (!capable(CAP_NET_ADMIN))
|
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
|
|
/* Already in a non tty mode */
|
|
|
|
|
if (dlci->adaption > 2)
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
|
|
if (nc->protocol != htons(ETH_P_IP))
|
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
|
|
|
|
|
if (nc->adaption != 3 && nc->adaption != 4)
|
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
|
2020-02-19 08:49:45 +00:00
|
|
|
|
pr_debug("create network interface\n");
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
netname = "gsm%d";
|
|
|
|
|
if (nc->if_name[0] != '\0')
|
|
|
|
|
netname = nc->if_name;
|
net: set name_assign_type in alloc_netdev()
Extend alloc_netdev{,_mq{,s}}() to take name_assign_type as argument, and convert
all users to pass NET_NAME_UNKNOWN.
Coccinelle patch:
@@
expression sizeof_priv, name, setup, txqs, rxqs, count;
@@
(
-alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs)
+alloc_netdev_mqs(sizeof_priv, name, NET_NAME_UNKNOWN, setup, txqs, rxqs)
|
-alloc_netdev_mq(sizeof_priv, name, setup, count)
+alloc_netdev_mq(sizeof_priv, name, NET_NAME_UNKNOWN, setup, count)
|
-alloc_netdev(sizeof_priv, name, setup)
+alloc_netdev(sizeof_priv, name, NET_NAME_UNKNOWN, setup)
)
v9: move comments here from the wrong commit
Signed-off-by: Tom Gundersen <teg@jklm.no>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-14 14:37:24 +00:00
|
|
|
|
net = alloc_netdev(sizeof(struct gsm_mux_net), netname,
|
|
|
|
|
NET_NAME_UNKNOWN, gsm_mux_net_init);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
if (!net) {
|
2020-02-19 08:49:45 +00:00
|
|
|
|
pr_err("alloc_netdev failed\n");
|
2011-06-16 21:20:12 +00:00
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
2022-11-03 09:17:42 +00:00
|
|
|
|
net->mtu = dlci->mtu;
|
2022-11-03 09:17:41 +00:00
|
|
|
|
net->min_mtu = MIN_MTU;
|
2022-11-03 09:17:42 +00:00
|
|
|
|
net->max_mtu = dlci->mtu;
|
2015-03-29 12:54:13 +00:00
|
|
|
|
mux_net = netdev_priv(net);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
mux_net->dlci = dlci;
|
|
|
|
|
kref_init(&mux_net->ref);
|
2024-03-18 23:02:12 +00:00
|
|
|
|
strscpy(nc->if_name, net->name); /* return net name */
|
2011-06-16 21:20:12 +00:00
|
|
|
|
|
|
|
|
|
/* reconfigure dlci for network */
|
|
|
|
|
dlci->prev_adaption = dlci->adaption;
|
|
|
|
|
dlci->prev_data = dlci->data;
|
|
|
|
|
dlci->adaption = nc->adaption;
|
|
|
|
|
dlci->data = gsm_mux_rx_netchar;
|
|
|
|
|
dlci->net = net;
|
|
|
|
|
|
2020-02-19 08:49:45 +00:00
|
|
|
|
pr_debug("register netdev\n");
|
2011-06-16 21:20:12 +00:00
|
|
|
|
retval = register_netdev(net);
|
|
|
|
|
if (retval) {
|
|
|
|
|
pr_err("network register fail %d\n", retval);
|
|
|
|
|
dlci_net_free(dlci);
|
|
|
|
|
return retval;
|
|
|
|
|
}
|
|
|
|
|
return net->ifindex; /* return network index */
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Line discipline for real tty */
|
2015-02-04 18:23:59 +00:00
|
|
|
|
static struct tty_ldisc_ops tty_ldisc_packet = {
|
2010-03-26 11:32:54 +00:00
|
|
|
|
.owner = THIS_MODULE,
|
2021-05-05 09:19:07 +00:00
|
|
|
|
.num = N_GSM0710,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
.name = "n_gsm",
|
|
|
|
|
.open = gsmld_open,
|
|
|
|
|
.close = gsmld_close,
|
|
|
|
|
.flush_buffer = gsmld_flush_buffer,
|
|
|
|
|
.read = gsmld_read,
|
|
|
|
|
.write = gsmld_write,
|
|
|
|
|
.ioctl = gsmld_ioctl,
|
|
|
|
|
.poll = gsmld_poll,
|
|
|
|
|
.receive_buf = gsmld_receive_buf,
|
|
|
|
|
.write_wakeup = gsmld_write_wakeup
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Virtual tty side
|
|
|
|
|
*/
|
|
|
|
|
|
2022-04-22 07:10:24 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_modem_upd_via_data - send modem bits via convergence layer
|
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @brk: break signal
|
|
|
|
|
*
|
|
|
|
|
* Send an empty frame to signal mobile state changes and to transmit the
|
|
|
|
|
* break signal for adaption 2.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2022-04-22 07:10:24 +00:00
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
2022-10-08 11:02:20 +00:00
|
|
|
|
unsigned long flags;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
|
|
|
|
|
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
|
|
|
|
|
return;
|
|
|
|
|
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_lock_irqsave(&gsm->tx_lock, flags);
|
2022-04-22 07:10:24 +00:00
|
|
|
|
gsm_dlci_modem_output(gsm, dlci, brk);
|
2022-10-08 11:02:20 +00:00
|
|
|
|
spin_unlock_irqrestore(&gsm->tx_lock, flags);
|
2022-04-22 07:10:24 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* gsm_modem_upd_via_msc - send modem bits via control frame
|
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @brk: break signal
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2022-04-14 09:42:17 +00:00
|
|
|
|
u8 modembits[3];
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_control *ctrl;
|
|
|
|
|
int len = 2;
|
|
|
|
|
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (dlci->gsm->encoding != GSM_BASIC_OPT)
|
2022-04-22 07:10:24 +00:00
|
|
|
|
return 0;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
2022-04-14 09:42:17 +00:00
|
|
|
|
modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */
|
2022-04-14 09:42:18 +00:00
|
|
|
|
if (!brk) {
|
|
|
|
|
modembits[1] = (gsm_encode_modem(dlci) << 1) | EA;
|
|
|
|
|
} else {
|
|
|
|
|
modembits[1] = gsm_encode_modem(dlci) << 1;
|
2022-04-14 09:42:17 +00:00
|
|
|
|
modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */
|
2010-03-26 11:32:54 +00:00
|
|
|
|
len++;
|
2022-04-14 09:42:17 +00:00
|
|
|
|
}
|
|
|
|
|
ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (ctrl == NULL)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
return gsm_control_wait(dlci->gsm, ctrl);
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-22 07:10:24 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_modem_update - send modem status line state
|
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @brk: break signal
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
|
|
|
|
|
{
|
2023-10-26 05:58:43 +00:00
|
|
|
|
if (dlci->gsm->dead)
|
|
|
|
|
return -EL2HLT;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
if (dlci->adaption == 2) {
|
|
|
|
|
/* Send convergence layer type 2 empty data frame. */
|
|
|
|
|
gsm_modem_upd_via_data(dlci, brk);
|
|
|
|
|
return 0;
|
2022-08-31 07:37:55 +00:00
|
|
|
|
} else if (dlci->gsm->encoding == GSM_BASIC_OPT) {
|
2022-04-22 07:10:24 +00:00
|
|
|
|
/* Send as MSC control message. */
|
|
|
|
|
return gsm_modem_upd_via_msc(dlci, brk);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Modem status lines are not supported. */
|
|
|
|
|
return -EPROTONOSUPPORT;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-06 11:46:06 +00:00
|
|
|
|
/**
|
|
|
|
|
* gsm_wait_modem_change - wait for modem status line change
|
|
|
|
|
* @dlci: channel
|
|
|
|
|
* @mask: modem status line bits
|
|
|
|
|
*
|
|
|
|
|
* The function returns if:
|
|
|
|
|
* - any given modem status line bit changed
|
|
|
|
|
* - the wait event function got interrupted (e.g. by a signal)
|
|
|
|
|
* - the underlying DLCI was closed
|
|
|
|
|
* - the underlying ldisc device was removed
|
|
|
|
|
*/
|
|
|
|
|
static int gsm_wait_modem_change(struct gsm_dlci *dlci, u32 mask)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
u32 old = dlci->modem_rx;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
ret = wait_event_interruptible(gsm->event, gsm->dead ||
|
|
|
|
|
dlci->state != DLCI_OPEN ||
|
|
|
|
|
(old ^ dlci->modem_rx) & mask);
|
|
|
|
|
if (gsm->dead)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
if (dlci->state != DLCI_OPEN)
|
|
|
|
|
return -EL2NSYNC;
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 09:03:52 +00:00
|
|
|
|
static bool gsm_carrier_raised(struct tty_port *port)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
|
2018-04-07 17:19:51 +00:00
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Not yet open so no carrier info */
|
|
|
|
|
if (dlci->state != DLCI_OPEN)
|
2023-01-17 09:03:52 +00:00
|
|
|
|
return false;
|
2022-08-31 07:37:59 +00:00
|
|
|
|
if (debug & DBG_CD_ON)
|
2023-01-17 09:03:52 +00:00
|
|
|
|
return true;
|
2018-04-07 17:19:51 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Basic mode with control channel in ADM mode may not respond
|
|
|
|
|
* to CMD_MSC at all and modem_rx is empty.
|
|
|
|
|
*/
|
2022-08-31 07:37:55 +00:00
|
|
|
|
if (gsm->encoding == GSM_BASIC_OPT &&
|
|
|
|
|
gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx)
|
2023-01-17 09:03:52 +00:00
|
|
|
|
return true;
|
2018-04-07 17:19:51 +00:00
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return dlci->modem_rx & TIOCM_CD;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 09:03:57 +00:00
|
|
|
|
static void gsm_dtr_rts(struct tty_port *port, bool active)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
|
|
|
|
|
unsigned int modem_tx = dlci->modem_tx;
|
2023-01-17 09:03:57 +00:00
|
|
|
|
if (active)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
modem_tx |= TIOCM_DTR | TIOCM_RTS;
|
|
|
|
|
else
|
|
|
|
|
modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
|
|
|
|
|
if (modem_tx != dlci->modem_tx) {
|
|
|
|
|
dlci->modem_tx = modem_tx;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
gsm_modem_update(dlci, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct tty_port_operations gsm_port_ops = {
|
|
|
|
|
.carrier_raised = gsm_carrier_raised,
|
|
|
|
|
.dtr_rts = gsm_dtr_rts,
|
2012-11-15 08:49:53 +00:00
|
|
|
|
.destruct = gsm_dlci_free,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
};
|
|
|
|
|
|
2012-08-07 19:47:28 +00:00
|
|
|
|
static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_mux *gsm;
|
|
|
|
|
struct gsm_dlci *dlci;
|
|
|
|
|
unsigned int line = tty->index;
|
2019-07-10 19:26:55 +00:00
|
|
|
|
unsigned int mux = mux_line_to_num(line);
|
2012-08-07 19:47:28 +00:00
|
|
|
|
bool alloc = false;
|
|
|
|
|
int ret;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
line = line & 0x3F;
|
|
|
|
|
|
|
|
|
|
if (mux >= MAX_MUX)
|
|
|
|
|
return -ENXIO;
|
|
|
|
|
/* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */
|
|
|
|
|
if (gsm_mux[mux] == NULL)
|
|
|
|
|
return -EUNATCH;
|
|
|
|
|
if (line == 0 || line > 61) /* 62/63 reserved */
|
|
|
|
|
return -ECHRNG;
|
|
|
|
|
gsm = gsm_mux[mux];
|
|
|
|
|
if (gsm->dead)
|
|
|
|
|
return -EL2HLT;
|
2013-07-08 19:28:00 +00:00
|
|
|
|
/* If DLCI 0 is not yet fully open return an error.
|
|
|
|
|
This is ok from a locking
|
|
|
|
|
perspective as we don't have to worry about this
|
|
|
|
|
if DLCI0 is lost */
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
mutex_lock(&gsm->mutex);
|
|
|
|
|
if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) {
|
|
|
|
|
mutex_unlock(&gsm->mutex);
|
2012-08-13 12:43:15 +00:00
|
|
|
|
return -EL2NSYNC;
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci = gsm->dlci[line];
|
2012-08-07 19:47:28 +00:00
|
|
|
|
if (dlci == NULL) {
|
|
|
|
|
alloc = true;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
dlci = gsm_dlci_alloc(gsm, line);
|
2012-08-07 19:47:28 +00:00
|
|
|
|
}
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
if (dlci == NULL) {
|
|
|
|
|
mutex_unlock(&gsm->mutex);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return -ENOMEM;
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
}
|
2012-08-07 19:47:28 +00:00
|
|
|
|
ret = tty_port_install(&dlci->port, driver, tty);
|
|
|
|
|
if (ret) {
|
|
|
|
|
if (alloc)
|
|
|
|
|
dlci_put(dlci);
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
mutex_unlock(&gsm->mutex);
|
2012-08-07 19:47:28 +00:00
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
dlci_get(dlci);
|
|
|
|
|
dlci_get(gsm->dlci[0]);
|
|
|
|
|
mux_get(gsm);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty->driver_data = dlci;
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
mutex_unlock(&gsm->mutex);
|
2012-08-07 19:47:28 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gsmtty_open(struct tty_struct *tty, struct file *filp)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
struct tty_port *port = &dlci->port;
|
|
|
|
|
|
|
|
|
|
port->count++;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_port_tty_set(port, tty);
|
|
|
|
|
|
|
|
|
|
dlci->modem_rx = 0;
|
|
|
|
|
/* We could in theory open and close before we wait - eg if we get
|
|
|
|
|
a DM straight back. This is ok as that will have caused a hangup */
|
2023-01-17 09:03:47 +00:00
|
|
|
|
tty_port_set_initialized(port, true);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Start sending off SABM messages */
|
2023-03-15 10:53:52 +00:00
|
|
|
|
if (!dlci->gsm->wait_config) {
|
|
|
|
|
/* Start sending off SABM messages */
|
|
|
|
|
if (dlci->gsm->initiator)
|
|
|
|
|
gsm_dlci_begin_open(dlci);
|
|
|
|
|
else
|
|
|
|
|
gsm_dlci_set_opening(dlci);
|
|
|
|
|
} else {
|
|
|
|
|
gsm_dlci_set_wait_config(dlci);
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* And wait for virtual carrier */
|
|
|
|
|
return tty_port_block_til_ready(port, tty, filp);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_close(struct tty_struct *tty, struct file *filp)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2011-06-16 21:20:13 +00:00
|
|
|
|
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (dlci == NULL)
|
|
|
|
|
return;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
mutex_lock(&dlci->mutex);
|
|
|
|
|
gsm_destroy_network(dlci);
|
|
|
|
|
mutex_unlock(&dlci->mutex);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
gsm_dlci_begin_close(dlci);
|
2016-04-10 00:53:25 +00:00
|
|
|
|
if (tty_port_initialized(&dlci->port) && C_HUPCL(tty))
|
|
|
|
|
tty_port_lower_dtr_rts(&dlci->port);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_port_close_end(&dlci->port, tty);
|
|
|
|
|
tty_port_tty_set(&dlci->port, NULL);
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_hangup(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_port_hangup(&dlci->port);
|
|
|
|
|
gsm_dlci_begin_close(dlci);
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-10 09:15:03 +00:00
|
|
|
|
static ssize_t gsmtty_write(struct tty_struct *tty, const u8 *buf, size_t len)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2013-01-30 10:44:50 +00:00
|
|
|
|
int sent;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return -EINVAL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Stuff the bytes into the fifo queue */
|
2020-02-19 08:49:40 +00:00
|
|
|
|
sent = kfifo_in_locked(&dlci->fifo, buf, len, &dlci->lock);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Need to kick the channel */
|
|
|
|
|
gsm_dlci_data_kick(dlci);
|
|
|
|
|
return sent;
|
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 09:19:15 +00:00
|
|
|
|
static unsigned int gsmtty_write_room(struct tty_struct *tty)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
2021-03-02 06:22:14 +00:00
|
|
|
|
return 0;
|
2022-05-04 08:17:33 +00:00
|
|
|
|
return kfifo_avail(&dlci->fifo);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2021-05-05 09:19:19 +00:00
|
|
|
|
static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
2021-03-02 06:22:13 +00:00
|
|
|
|
return 0;
|
2020-02-19 08:49:40 +00:00
|
|
|
|
return kfifo_len(&dlci->fifo);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_flush_buffer(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2022-04-14 09:42:22 +00:00
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Caution needed: If we implement reliable transport classes
|
|
|
|
|
then the data being transmitted can't simply be junked once
|
|
|
|
|
it has first hit the stack. Until then we can just blow it
|
|
|
|
|
away */
|
2022-04-14 09:42:22 +00:00
|
|
|
|
spin_lock_irqsave(&dlci->lock, flags);
|
2020-02-19 08:49:40 +00:00
|
|
|
|
kfifo_reset(&dlci->fifo);
|
2022-04-14 09:42:22 +00:00
|
|
|
|
spin_unlock_irqrestore(&dlci->lock, flags);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* Need to unhook this DLCI from the transmit queue logic */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout)
|
|
|
|
|
{
|
|
|
|
|
/* The FIFO handles the queue so the kernel will do the right
|
|
|
|
|
thing waiting on chars_in_buffer before calling us. No work
|
|
|
|
|
to do here */
|
|
|
|
|
}
|
|
|
|
|
|
2011-02-14 16:26:14 +00:00
|
|
|
|
static int gsmtty_tiocmget(struct tty_struct *tty)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return -EINVAL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return dlci->modem_rx;
|
|
|
|
|
}
|
|
|
|
|
|
2011-02-14 16:26:50 +00:00
|
|
|
|
static int gsmtty_tiocmset(struct tty_struct *tty,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned int set, unsigned int clear)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
unsigned int modem_tx = dlci->modem_tx;
|
|
|
|
|
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return -EINVAL;
|
2011-09-23 08:59:43 +00:00
|
|
|
|
modem_tx &= ~clear;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
modem_tx |= set;
|
|
|
|
|
|
|
|
|
|
if (modem_tx != dlci->modem_tx) {
|
|
|
|
|
dlci->modem_tx = modem_tx;
|
2022-04-22 07:10:24 +00:00
|
|
|
|
return gsm_modem_update(dlci, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2011-02-14 16:27:22 +00:00
|
|
|
|
static int gsmtty_ioctl(struct tty_struct *tty,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
|
{
|
2011-06-16 21:20:12 +00:00
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
struct gsm_netconfig nc;
|
2023-03-15 10:53:52 +00:00
|
|
|
|
struct gsm_dlci_config dc;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
int index;
|
|
|
|
|
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return -EINVAL;
|
2011-06-16 21:20:12 +00:00
|
|
|
|
switch (cmd) {
|
|
|
|
|
case GSMIOC_ENABLE_NET:
|
|
|
|
|
if (copy_from_user(&nc, (void __user *)arg, sizeof(nc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
nc.if_name[IFNAMSIZ-1] = '\0';
|
|
|
|
|
/* return net interface index or error code */
|
|
|
|
|
mutex_lock(&dlci->mutex);
|
|
|
|
|
index = gsm_create_network(dlci, &nc);
|
|
|
|
|
mutex_unlock(&dlci->mutex);
|
|
|
|
|
if (copy_to_user((void __user *)arg, &nc, sizeof(nc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
return index;
|
|
|
|
|
case GSMIOC_DISABLE_NET:
|
|
|
|
|
if (!capable(CAP_NET_ADMIN))
|
|
|
|
|
return -EPERM;
|
|
|
|
|
mutex_lock(&dlci->mutex);
|
|
|
|
|
gsm_destroy_network(dlci);
|
|
|
|
|
mutex_unlock(&dlci->mutex);
|
|
|
|
|
return 0;
|
2023-03-15 10:53:52 +00:00
|
|
|
|
case GSMIOC_GETCONF_DLCI:
|
|
|
|
|
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
if (dc.channel != dlci->addr)
|
|
|
|
|
return -EPERM;
|
|
|
|
|
gsm_dlci_copy_config_values(dlci, &dc);
|
|
|
|
|
if (copy_to_user((void __user *)arg, &dc, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
return 0;
|
|
|
|
|
case GSMIOC_SETCONF_DLCI:
|
|
|
|
|
if (copy_from_user(&dc, (void __user *)arg, sizeof(dc)))
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
if (dc.channel >= NUM_DLCI)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
if (dc.channel != 0 && dc.channel != dlci->addr)
|
|
|
|
|
return -EPERM;
|
|
|
|
|
return gsm_dlci_config(dlci, &dc, 1);
|
2023-02-06 11:46:06 +00:00
|
|
|
|
case TIOCMIWAIT:
|
|
|
|
|
return gsm_wait_modem_change(dlci, (u32)arg);
|
2011-06-16 21:20:12 +00:00
|
|
|
|
default:
|
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-08-16 11:57:39 +00:00
|
|
|
|
static void gsmtty_set_termios(struct tty_struct *tty,
|
|
|
|
|
const struct ktermios *old)
|
2010-03-26 11:32:54 +00:00
|
|
|
|
{
|
2013-01-30 10:44:50 +00:00
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
/* For the moment its fixed. In actual fact the speed information
|
|
|
|
|
for the virtual channel can be propogated in both directions by
|
|
|
|
|
the RPN control message. This however rapidly gets nasty as we
|
|
|
|
|
then have to remap modem signals each way according to whether
|
|
|
|
|
our virtual cable is null modem etc .. */
|
2012-07-14 14:31:47 +00:00
|
|
|
|
tty_termios_copy_hw(&tty->termios, old);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_throttle(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2016-01-11 04:36:15 +00:00
|
|
|
|
if (C_CRTSCTS(tty))
|
2022-02-18 07:31:21 +00:00
|
|
|
|
dlci->modem_tx &= ~TIOCM_RTS;
|
2020-02-19 08:49:47 +00:00
|
|
|
|
dlci->throttled = true;
|
2022-02-18 07:31:21 +00:00
|
|
|
|
/* Send an MSC with RTS cleared */
|
2022-04-22 07:10:24 +00:00
|
|
|
|
gsm_modem_update(dlci, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void gsmtty_unthrottle(struct tty_struct *tty)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return;
|
2016-01-11 04:36:15 +00:00
|
|
|
|
if (C_CRTSCTS(tty))
|
2022-02-18 07:31:21 +00:00
|
|
|
|
dlci->modem_tx |= TIOCM_RTS;
|
2020-02-19 08:49:47 +00:00
|
|
|
|
dlci->throttled = false;
|
2022-02-18 07:31:21 +00:00
|
|
|
|
/* Send an MSC with RTS set */
|
2022-04-22 07:10:24 +00:00
|
|
|
|
gsm_modem_update(dlci, 0);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int gsmtty_break_ctl(struct tty_struct *tty, int state)
|
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
int encode = 0; /* Off */
|
2013-01-30 10:44:50 +00:00
|
|
|
|
if (dlci->state == DLCI_CLOSED)
|
|
|
|
|
return -EINVAL;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
if (state == -1) /* "On indefinitely" - we can't encode this
|
|
|
|
|
properly */
|
|
|
|
|
encode = 0x0F;
|
|
|
|
|
else if (state > 0) {
|
|
|
|
|
encode = state / 200; /* mS to encoding */
|
|
|
|
|
if (encode > 0x0F)
|
|
|
|
|
encode = 0x0F; /* Best effort */
|
|
|
|
|
}
|
2022-04-22 07:10:24 +00:00
|
|
|
|
return gsm_modem_update(dlci, encode);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-03-28 02:42:56 +00:00
|
|
|
|
static void gsmtty_cleanup(struct tty_struct *tty)
|
n_gsm: race between ld close and gsmtty open
ttyA has ld associated to n_gsm, when ttyA is closing, it triggers
to release gsmttyB's ld data dlci[B], then race would happen if gsmttyB
is opening in parallel.
(Note: This patch set differs from previous set in that it uses mutex
instead of spin lock to avoid race, so that it avoids sleeping in automic
context)
Here are race cases we found recently in test:
CASE #1
====================================================================
releasing dlci[B] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(gsmttyB), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[B]) -----
| |
gsm_dlci_free(dlci[B]) -----
| |
----- gsmtty_open(gsmttyB)
gsmtty_open()
{
struct gsm_dlci *dlci = tty->driver_data; => here it uses dlci[B]
...
}
In gsmtty_open(gsmttyA), it uses dlci[B] which was release, so hit a panic.
=====================================================================
CASE #2
=====================================================================
releasing dlci[0] race with gsmtty_install(gsmttyB), then panic
in gsmtty_open(), as below:
tty_release(ttyA) tty_open(gsmttyB)
| |
----- gsmtty_install(gsmttyB)
| |
----- gsm_dlci_alloc(gsmttyB) => alloc dlci[B]
| |
----- gsmtty_open(gsmttyB) fail
| |
----- tty_release(gsmttyB)
| |
----- gsmtty_close(gsmttyB)
| |
----- gsmtty_detach_dlci(dlci[B])
| |
----- dlci_put(dlci[B])
| |
tty_ldisc_release(ttyA) -----
| |
gsm_dlci_release(dlci[0]) -----
| |
gsm_dlci_free(dlci[0]) -----
| |
----- dlci_put(dlci[0])
In gsmtty_detach_dlci(dlci[B]), it tries to use dlci[0] which was released,
then hit panic.
=====================================================================
IMHO, n_gsm tty operations would refer released ldisc, as long as
gsm_dlci_release() has chance to release ldisc data when some gsmtty operations
are ongoing..
This patch is try to avoid it by:
1) in n_gsm driver, use a global gsm mutex lock to avoid gsm_dlci_release() run in
parallel with gsmtty_install();
2) Increase dlci's ref count in gsmtty_install() instead of in gsmtty_open(), the
purpose is to prevent gsm_dlci_release() releasing dlci after gsmtty_install()
allocats dlci but before gsmtty_open increases dlci's ref count;
3) Decrease dlci's ref count in gsmtty_remove(), a tty framework API, this is the
opposite process of step 2).
Signed-off-by: Chao Bi <chao.bi@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-11-26 04:09:39 +00:00
|
|
|
|
{
|
|
|
|
|
struct gsm_dlci *dlci = tty->driver_data;
|
|
|
|
|
struct gsm_mux *gsm = dlci->gsm;
|
|
|
|
|
|
|
|
|
|
dlci_put(dlci);
|
|
|
|
|
dlci_put(gsm->dlci[0]);
|
|
|
|
|
mux_put(gsm);
|
|
|
|
|
}
|
2010-03-26 11:32:54 +00:00
|
|
|
|
|
|
|
|
|
/* Virtual ttys for the demux */
|
|
|
|
|
static const struct tty_operations gsmtty_ops = {
|
2012-08-07 19:47:28 +00:00
|
|
|
|
.install = gsmtty_install,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
.open = gsmtty_open,
|
|
|
|
|
.close = gsmtty_close,
|
|
|
|
|
.write = gsmtty_write,
|
|
|
|
|
.write_room = gsmtty_write_room,
|
|
|
|
|
.chars_in_buffer = gsmtty_chars_in_buffer,
|
|
|
|
|
.flush_buffer = gsmtty_flush_buffer,
|
|
|
|
|
.ioctl = gsmtty_ioctl,
|
|
|
|
|
.throttle = gsmtty_throttle,
|
|
|
|
|
.unthrottle = gsmtty_unthrottle,
|
|
|
|
|
.set_termios = gsmtty_set_termios,
|
|
|
|
|
.hangup = gsmtty_hangup,
|
|
|
|
|
.wait_until_sent = gsmtty_wait_until_sent,
|
|
|
|
|
.tiocmget = gsmtty_tiocmget,
|
|
|
|
|
.tiocmset = gsmtty_tiocmset,
|
|
|
|
|
.break_ctl = gsmtty_break_ctl,
|
2015-03-28 02:42:56 +00:00
|
|
|
|
.cleanup = gsmtty_cleanup,
|
2010-03-26 11:32:54 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static int __init gsm_init(void)
|
|
|
|
|
{
|
|
|
|
|
/* Fill in our line protocol discipline, and register it */
|
2021-05-05 09:19:07 +00:00
|
|
|
|
int status = tty_register_ldisc(&tty_ldisc_packet);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
if (status != 0) {
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_err("n_gsm: can't register line discipline (err = %d)\n",
|
|
|
|
|
status);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return status;
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-31 07:37:56 +00:00
|
|
|
|
gsm_tty_driver = tty_alloc_driver(GSM_TTY_MINORS, TTY_DRIVER_REAL_RAW |
|
2021-07-23 07:43:13 +00:00
|
|
|
|
TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
|
|
|
|
|
if (IS_ERR(gsm_tty_driver)) {
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_err("gsm_init: tty allocation failed.\n");
|
2021-07-23 07:43:13 +00:00
|
|
|
|
status = PTR_ERR(gsm_tty_driver);
|
2021-05-05 09:19:08 +00:00
|
|
|
|
goto err_unreg_ldisc;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
gsm_tty_driver->driver_name = "gsmtty";
|
|
|
|
|
gsm_tty_driver->name = "gsmtty";
|
|
|
|
|
gsm_tty_driver->major = 0; /* Dynamic */
|
|
|
|
|
gsm_tty_driver->minor_start = 0;
|
|
|
|
|
gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
|
|
|
|
|
gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
|
|
|
|
|
gsm_tty_driver->init_termios = tty_std_termios;
|
|
|
|
|
/* Fixme */
|
|
|
|
|
gsm_tty_driver->init_termios.c_lflag &= ~ECHO;
|
|
|
|
|
tty_set_operations(gsm_tty_driver, &gsmtty_ops);
|
|
|
|
|
|
|
|
|
|
if (tty_register_driver(gsm_tty_driver)) {
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_err("gsm_init: tty registration failed.\n");
|
2021-05-05 09:19:08 +00:00
|
|
|
|
status = -EBUSY;
|
|
|
|
|
goto err_put_driver;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
2010-11-04 15:17:27 +00:00
|
|
|
|
pr_debug("gsm_init: loaded as %d,%d.\n",
|
|
|
|
|
gsm_tty_driver->major, gsm_tty_driver->minor_start);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
return 0;
|
2021-05-05 09:19:08 +00:00
|
|
|
|
err_put_driver:
|
2021-07-23 07:43:16 +00:00
|
|
|
|
tty_driver_kref_put(gsm_tty_driver);
|
2021-05-05 09:19:08 +00:00
|
|
|
|
err_unreg_ldisc:
|
2021-05-05 09:19:09 +00:00
|
|
|
|
tty_unregister_ldisc(&tty_ldisc_packet);
|
2021-05-05 09:19:08 +00:00
|
|
|
|
return status;
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void __exit gsm_exit(void)
|
|
|
|
|
{
|
2021-05-05 09:19:11 +00:00
|
|
|
|
tty_unregister_ldisc(&tty_ldisc_packet);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
tty_unregister_driver(gsm_tty_driver);
|
2021-07-23 07:43:16 +00:00
|
|
|
|
tty_driver_kref_put(gsm_tty_driver);
|
2010-03-26 11:32:54 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
module_init(gsm_init);
|
|
|
|
|
module_exit(gsm_exit);
|
|
|
|
|
|
|
|
|
|
|
2024-06-07 23:10:20 +00:00
|
|
|
|
MODULE_DESCRIPTION("GSM 0710 tty multiplexor");
|
2010-03-26 11:32:54 +00:00
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
MODULE_ALIAS_LDISC(N_GSM0710);
|