2017-11-06 17:11:51 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* SuperH on-chip serial module support. (SCI with no FIFO / with FIFO)
|
|
|
|
*
|
2011-01-13 06:06:28 +00:00
|
|
|
* Copyright (C) 2002 - 2011 Paul Mundt
|
2015-10-26 08:58:16 +00:00
|
|
|
* Copyright (C) 2015 Glider bvba
|
2007-08-19 23:59:33 +00:00
|
|
|
* Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007).
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* based off of the old drivers/char/sh-sci.c by:
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999, 2000 Niibe Yutaka
|
|
|
|
* Copyright (C) 2000 Sugioka Toshinobu
|
|
|
|
* Modified to support multiple serial ports. Stuart Menefy (May 2000).
|
|
|
|
* Modified to support SecureEdge. David McCullough (2002)
|
|
|
|
* Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003).
|
2007-07-25 02:42:56 +00:00
|
|
|
* Removed SH7300 support (Jul 2007).
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2007-03-14 04:22:37 +00:00
|
|
|
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
|
|
|
|
#define SUPPORT_SYSRQ
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#undef DEBUG
|
|
|
|
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/console.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/cpufreq.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/dmaengine.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/err.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/errno.h>
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/init.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/ioport.h>
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/major.h>
|
|
|
|
#include <linux/module.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/mm.h>
|
2013-12-06 09:59:54 +00:00
|
|
|
#include <linux/of.h>
|
2017-10-04 12:21:56 +00:00
|
|
|
#include <linux/of_device.h>
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/platform_device.h>
|
2011-04-19 10:38:25 +00:00
|
|
|
#include <linux/pm_runtime.h>
|
2010-03-02 02:39:15 +00:00
|
|
|
#include <linux/scatterlist.h>
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/serial.h>
|
|
|
|
#include <linux/serial_sci.h>
|
|
|
|
#include <linux/sh_dma.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2013-12-06 09:59:10 +00:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/sysrq.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/tty.h>
|
|
|
|
#include <linux/tty_flip.h>
|
2008-04-25 07:04:20 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_SUPERH
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/sh_bios.h>
|
|
|
|
#endif
|
|
|
|
|
2016-06-03 10:00:04 +00:00
|
|
|
#include "serial_mctrl_gpio.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
#include "sh-sci.h"
|
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
/* Offsets into the sci_port->irqs array */
|
|
|
|
enum {
|
|
|
|
SCIx_ERI_IRQ,
|
|
|
|
SCIx_RXI_IRQ,
|
|
|
|
SCIx_TXI_IRQ,
|
|
|
|
SCIx_BRI_IRQ,
|
|
|
|
SCIx_NR_IRQS,
|
|
|
|
|
|
|
|
SCIx_MUX_IRQ = SCIx_NR_IRQS, /* special case */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define SCIx_IRQ_IS_MUXED(port) \
|
|
|
|
((port)->irqs[SCIx_ERI_IRQ] == \
|
|
|
|
(port)->irqs[SCIx_RXI_IRQ]) || \
|
|
|
|
((port)->irqs[SCIx_ERI_IRQ] && \
|
|
|
|
((port)->irqs[SCIx_RXI_IRQ] < 0))
|
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
enum SCI_CLKS {
|
|
|
|
SCI_FCK, /* Functional Clock */
|
2015-11-18 10:12:26 +00:00
|
|
|
SCI_SCK, /* Optional External Clock */
|
2015-11-18 10:25:53 +00:00
|
|
|
SCI_BRG_INT, /* Optional BRG Internal Clock Source */
|
|
|
|
SCI_SCIF_CLK, /* Optional BRG External Clock Source */
|
2015-10-26 08:58:16 +00:00
|
|
|
SCI_NUM_CLKS
|
|
|
|
};
|
|
|
|
|
2016-01-04 13:45:21 +00:00
|
|
|
/* Bit x set means sampling rate x + 1 is supported */
|
|
|
|
#define SCI_SR(x) BIT((x) - 1)
|
|
|
|
#define SCI_SR_RANGE(x, y) GENMASK((y) - 1, (x) - 1)
|
|
|
|
|
serial: sh-sci: Add support for SCIFA/SCIFB variable sampling rates
Add support for sparse variable sampling rates on SCIFA and SCIFB.
According to the datasheet, sampling rate 1/5 needs a small quirk to
avoid corrupting the first byte received.
This increases the range and accuracy of supported baud rates.
E.g. on r8a7791/koelsch:
- Supports now 134, 150, and standard 500000-4000000 bps,
- Perfect match for 134, 150, 500000, 1000000, 2000000, and 4000000
bps,
- Accuracy has increased for most standard bps values.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-01-04 13:45:22 +00:00
|
|
|
#define SCI_SR_SCIFAB SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
|
|
|
|
SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
|
|
|
|
SCI_SR(19) | SCI_SR(27)
|
|
|
|
|
2016-01-04 13:45:21 +00:00
|
|
|
#define min_sr(_port) ffs((_port)->sampling_rate_mask)
|
|
|
|
#define max_sr(_port) fls((_port)->sampling_rate_mask)
|
|
|
|
|
|
|
|
/* Iterate over all supported sampling rates, from high to low */
|
|
|
|
#define for_each_sr(_sr, _port) \
|
|
|
|
for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--) \
|
|
|
|
if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
|
|
|
|
|
2017-01-11 14:43:34 +00:00
|
|
|
struct plat_sci_reg {
|
|
|
|
u8 offset, size;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sci_port_params {
|
|
|
|
const struct plat_sci_reg regs[SCIx_NR_REGS];
|
2017-01-11 14:43:36 +00:00
|
|
|
unsigned int fifosize;
|
|
|
|
unsigned int overrun_reg;
|
|
|
|
unsigned int overrun_mask;
|
|
|
|
unsigned int sampling_rate_mask;
|
|
|
|
unsigned int error_mask;
|
|
|
|
unsigned int error_clear;
|
2017-01-11 14:43:34 +00:00
|
|
|
};
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
struct sci_port {
|
|
|
|
struct uart_port port;
|
|
|
|
|
2011-01-19 06:24:40 +00:00
|
|
|
/* Platform configuration */
|
2017-01-11 14:43:34 +00:00
|
|
|
const struct sci_port_params *params;
|
2017-01-11 14:43:35 +00:00
|
|
|
const struct plat_sci_port *cfg;
|
2016-01-04 13:45:21 +00:00
|
|
|
unsigned int sampling_rate_mask;
|
2015-05-16 14:57:31 +00:00
|
|
|
resource_size_t reg_size;
|
2016-06-03 10:00:04 +00:00
|
|
|
struct mctrl_gpios *gpios;
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
/* Clocks */
|
|
|
|
struct clk *clks[SCI_NUM_CLKS];
|
|
|
|
unsigned long clk_rates[SCI_NUM_CLKS];
|
2009-11-25 07:23:35 +00:00
|
|
|
|
2013-12-06 09:59:16 +00:00
|
|
|
int irqs[SCIx_NR_IRQS];
|
2011-06-28 06:25:36 +00:00
|
|
|
char *irqstr[SCIx_NR_IRQS];
|
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
struct dma_chan *chan_tx;
|
|
|
|
struct dma_chan *chan_rx;
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
|
|
|
dma_cookie_t cookie_tx;
|
|
|
|
dma_cookie_t cookie_rx[2];
|
|
|
|
dma_cookie_t active_rx;
|
2015-08-21 18:02:42 +00:00
|
|
|
dma_addr_t tx_dma_addr;
|
|
|
|
unsigned int tx_dma_len;
|
2010-03-02 02:39:15 +00:00
|
|
|
struct scatterlist sg_rx[2];
|
2015-08-21 18:02:54 +00:00
|
|
|
void *rx_buf[2];
|
2010-03-02 02:39:15 +00:00
|
|
|
size_t buf_len_rx;
|
|
|
|
struct work_struct work_tx;
|
|
|
|
struct timer_list rx_timer;
|
2010-03-19 13:53:04 +00:00
|
|
|
unsigned int rx_timeout;
|
2010-03-02 02:39:15 +00:00
|
|
|
#endif
|
2017-02-03 10:38:18 +00:00
|
|
|
unsigned int rx_frame;
|
2017-02-03 10:38:17 +00:00
|
|
|
int rx_trigger;
|
2017-02-03 10:38:18 +00:00
|
|
|
struct timer_list rx_fifo_timer;
|
|
|
|
int rx_fifo_timeout;
|
2017-09-29 13:08:53 +00:00
|
|
|
u16 hscif_tot;
|
2016-06-03 10:00:10 +00:00
|
|
|
|
2017-01-11 14:43:39 +00:00
|
|
|
bool has_rtscts;
|
2016-06-03 10:00:10 +00:00
|
|
|
bool autorts;
|
2006-09-27 07:32:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
|
2006-02-01 11:06:06 +00:00
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
static struct sci_port sci_ports[SCI_NPORTS];
|
|
|
|
static struct uart_driver sci_uart_driver;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-13 09:18:35 +00:00
|
|
|
static inline struct sci_port *
|
|
|
|
to_sci_port(struct uart_port *uart)
|
|
|
|
{
|
|
|
|
return container_of(uart, struct sci_port, port);
|
|
|
|
}
|
|
|
|
|
2017-01-11 14:43:34 +00:00
|
|
|
static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
|
2011-06-14 03:40:19 +00:00
|
|
|
/*
|
|
|
|
* Common SCI definitions, dependent on the port's regshift
|
|
|
|
* value.
|
|
|
|
*/
|
|
|
|
[SCIx_SCI_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 8 },
|
|
|
|
[SCBRR] = { 0x01, 8 },
|
|
|
|
[SCSCR] = { 0x02, 8 },
|
|
|
|
[SCxTDR] = { 0x03, 8 },
|
|
|
|
[SCxSR] = { 0x04, 8 },
|
|
|
|
[SCxRDR] = { 0x05, 8 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 1,
|
|
|
|
.overrun_reg = SCxSR,
|
|
|
|
.overrun_mask = SCI_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
|
|
|
|
.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
2017-01-11 14:43:32 +00:00
|
|
|
* Common definitions for legacy IrDA ports.
|
2011-06-14 03:40:19 +00:00
|
|
|
*/
|
|
|
|
[SCIx_IRDA_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 8 },
|
|
|
|
[SCBRR] = { 0x02, 8 },
|
|
|
|
[SCSCR] = { 0x04, 8 },
|
|
|
|
[SCxTDR] = { 0x06, 8 },
|
|
|
|
[SCxSR] = { 0x08, 16 },
|
|
|
|
[SCxRDR] = { 0x0a, 8 },
|
|
|
|
[SCFCR] = { 0x0c, 8 },
|
|
|
|
[SCFDR] = { 0x0e, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 1,
|
|
|
|
.overrun_reg = SCxSR,
|
|
|
|
.overrun_mask = SCI_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCI_DEFAULT_ERROR_MASK | SCI_ORER,
|
|
|
|
.error_clear = SCI_ERROR_CLEAR & ~SCI_ORER,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SCIFA definitions.
|
|
|
|
*/
|
|
|
|
[SCIx_SCIFA_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x20, 8 },
|
|
|
|
[SCxSR] = { 0x14, 16 },
|
|
|
|
[SCxRDR] = { 0x24, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCPCR] = { 0x30, 16 },
|
|
|
|
[SCPDR] = { 0x34, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 64,
|
|
|
|
.overrun_reg = SCxSR,
|
|
|
|
.overrun_mask = SCIFA_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR_SCIFAB,
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SCIFB definitions.
|
|
|
|
*/
|
|
|
|
[SCIx_SCIFB_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x40, 8 },
|
|
|
|
[SCxSR] = { 0x14, 16 },
|
|
|
|
[SCxRDR] = { 0x60, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCTFDR] = { 0x38, 16 },
|
|
|
|
[SCRFDR] = { 0x3c, 16 },
|
|
|
|
[SCPCR] = { 0x30, 16 },
|
|
|
|
[SCPDR] = { 0x34, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 256,
|
|
|
|
.overrun_reg = SCxSR,
|
|
|
|
.overrun_mask = SCIFA_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR_SCIFAB,
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
2011-10-03 14:16:47 +00:00
|
|
|
/*
|
|
|
|
* Common SH-2(A) SCIF definitions for ports with FIFO data
|
|
|
|
* count registers.
|
|
|
|
*/
|
|
|
|
[SCIx_SH2_SCIF_FIFODATA_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCSPTR] = { 0x20, 16 },
|
|
|
|
[SCLSR] = { 0x24, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2011-10-03 14:16:47 +00:00
|
|
|
},
|
|
|
|
|
2011-06-14 03:40:19 +00:00
|
|
|
/*
|
|
|
|
* Common SH-3 SCIF definitions.
|
|
|
|
*/
|
|
|
|
[SCIx_SH3_SCIF_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 8 },
|
|
|
|
[SCBRR] = { 0x02, 8 },
|
|
|
|
[SCSCR] = { 0x04, 8 },
|
|
|
|
[SCxTDR] = { 0x06, 8 },
|
|
|
|
[SCxSR] = { 0x08, 16 },
|
|
|
|
[SCxRDR] = { 0x0a, 8 },
|
|
|
|
[SCFCR] = { 0x0c, 8 },
|
|
|
|
[SCFDR] = { 0x0e, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SH-4(A) SCIF(B) definitions.
|
|
|
|
*/
|
|
|
|
[SCIx_SH4_SCIF_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCSPTR] = { 0x20, 16 },
|
|
|
|
[SCLSR] = { 0x24, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2015-11-12 12:36:06 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SCIF definitions for ports with a Baud Rate Generator for
|
|
|
|
* External Clock (BRG).
|
|
|
|
*/
|
|
|
|
[SCIx_SH4_SCIF_BRG_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCSPTR] = { 0x20, 16 },
|
|
|
|
[SCLSR] = { 0x24, 16 },
|
|
|
|
[SCDL] = { 0x30, 16 },
|
|
|
|
[SCCKS] = { 0x34, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2013-05-31 15:57:01 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common HSCIF definitions.
|
|
|
|
*/
|
|
|
|
[SCIx_HSCIF_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCSPTR] = { 0x20, 16 },
|
|
|
|
[SCLSR] = { 0x24, 16 },
|
|
|
|
[HSSRR] = { 0x40, 16 },
|
|
|
|
[SCDL] = { 0x30, 16 },
|
|
|
|
[SCCKS] = { 0x34, 16 },
|
2017-02-02 17:10:14 +00:00
|
|
|
[HSRTRGR] = { 0x54, 16 },
|
|
|
|
[HSTTRGR] = { 0x58, 16 },
|
2017-01-11 14:43:34 +00:00
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 128,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR_RANGE(8, 32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SH-4(A) SCIF(B) definitions for ports without an SCSPTR
|
|
|
|
* register.
|
|
|
|
*/
|
|
|
|
[SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCLSR] = { 0x24, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Common SH-4(A) SCIF(B) definitions for ports with FIFO data
|
|
|
|
* count registers.
|
|
|
|
*/
|
|
|
|
[SCIx_SH4_SCIF_FIFODATA_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x0c, 8 },
|
|
|
|
[SCxSR] = { 0x10, 16 },
|
|
|
|
[SCxRDR] = { 0x14, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
[SCTFDR] = { 0x1c, 16 }, /* aliased to SCFDR */
|
|
|
|
[SCRFDR] = { 0x20, 16 },
|
|
|
|
[SCSPTR] = { 0x24, 16 },
|
|
|
|
[SCLSR] = { 0x28, 16 },
|
|
|
|
},
|
2017-01-11 14:43:36 +00:00
|
|
|
.fifosize = 16,
|
|
|
|
.overrun_reg = SCLSR,
|
|
|
|
.overrun_mask = SCLSR_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(32),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SH7705-style SCIF(B) ports, lacking both SCSPTR and SCLSR
|
|
|
|
* registers.
|
|
|
|
*/
|
|
|
|
[SCIx_SH7705_SCIF_REGTYPE] = {
|
2017-01-11 14:43:34 +00:00
|
|
|
.regs = {
|
|
|
|
[SCSMR] = { 0x00, 16 },
|
|
|
|
[SCBRR] = { 0x04, 8 },
|
|
|
|
[SCSCR] = { 0x08, 16 },
|
|
|
|
[SCxTDR] = { 0x20, 8 },
|
|
|
|
[SCxSR] = { 0x14, 16 },
|
|
|
|
[SCxRDR] = { 0x24, 8 },
|
|
|
|
[SCFCR] = { 0x18, 16 },
|
|
|
|
[SCFDR] = { 0x1c, 16 },
|
|
|
|
},
|
2017-02-03 10:38:17 +00:00
|
|
|
.fifosize = 64,
|
2017-01-11 14:43:36 +00:00
|
|
|
.overrun_reg = SCxSR,
|
|
|
|
.overrun_mask = SCIFA_ORER,
|
|
|
|
.sampling_rate_mask = SCI_SR(16),
|
|
|
|
.error_mask = SCIF_DEFAULT_ERROR_MASK | SCIFA_ORER,
|
|
|
|
.error_clear = SCIF_ERROR_CLEAR & ~SCIFA_ORER,
|
2011-06-14 03:40:19 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2017-01-11 14:43:34 +00:00
|
|
|
#define sci_getreg(up, offset) (&to_sci_port(up)->params->regs[offset])
|
2011-06-14 08:38:19 +00:00
|
|
|
|
2011-06-14 03:40:19 +00:00
|
|
|
/*
|
|
|
|
* The "offset" here is rather misleading, in that it refers to an enum
|
|
|
|
* value relative to the port mapping rather than the fixed offset
|
|
|
|
* itself, which needs to be manually retrieved from the platform's
|
|
|
|
* register map for the given port.
|
|
|
|
*/
|
|
|
|
static unsigned int sci_serial_in(struct uart_port *p, int offset)
|
|
|
|
{
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg = sci_getreg(p, offset);
|
2011-06-14 03:40:19 +00:00
|
|
|
|
|
|
|
if (reg->size == 8)
|
|
|
|
return ioread8(p->membase + (reg->offset << p->regshift));
|
|
|
|
else if (reg->size == 16)
|
|
|
|
return ioread16(p->membase + (reg->offset << p->regshift));
|
|
|
|
else
|
|
|
|
WARN(1, "Invalid register access\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_serial_out(struct uart_port *p, int offset, int value)
|
|
|
|
{
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg = sci_getreg(p, offset);
|
2011-06-14 03:40:19 +00:00
|
|
|
|
|
|
|
if (reg->size == 8)
|
|
|
|
iowrite8(value, p->membase + (reg->offset << p->regshift));
|
|
|
|
else if (reg->size == 16)
|
|
|
|
iowrite16(value, p->membase + (reg->offset << p->regshift));
|
|
|
|
else
|
|
|
|
WARN(1, "Invalid register access\n");
|
|
|
|
}
|
|
|
|
|
2011-06-28 04:55:31 +00:00
|
|
|
static void sci_port_enable(struct sci_port *sci_port)
|
|
|
|
{
|
2015-10-26 08:58:16 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2011-06-28 04:55:31 +00:00
|
|
|
if (!sci_port->port.dev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pm_runtime_get_sync(sci_port->port.dev);
|
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
for (i = 0; i < SCI_NUM_CLKS; i++) {
|
|
|
|
clk_prepare_enable(sci_port->clks[i]);
|
|
|
|
sci_port->clk_rates[i] = clk_get_rate(sci_port->clks[i]);
|
|
|
|
}
|
|
|
|
sci_port->port.uartclk = sci_port->clk_rates[SCI_FCK];
|
2011-06-28 04:55:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_port_disable(struct sci_port *sci_port)
|
|
|
|
{
|
2015-10-26 08:58:16 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2011-06-28 04:55:31 +00:00
|
|
|
if (!sci_port->port.dev)
|
|
|
|
return;
|
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
for (i = SCI_NUM_CLKS; i-- > 0; )
|
|
|
|
clk_disable_unprepare(sci_port->clks[i]);
|
2011-06-28 04:55:31 +00:00
|
|
|
|
|
|
|
pm_runtime_put_sync(sci_port->port.dev);
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static inline unsigned long port_rx_irq_mask(struct uart_port *port)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Not all ports (such as SCIFA) will support REIE. Rather than
|
|
|
|
* special-casing the port type, we check the port initialization
|
|
|
|
* IRQ enable mask to see whether the IRQ is desired at all. If
|
|
|
|
* it's unset, it's logically inferred that there's no point in
|
|
|
|
* testing for it.
|
|
|
|
*/
|
|
|
|
return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_start_tx(struct uart_port *port)
|
|
|
|
{
|
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
unsigned short ctrl;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
u16 new, scr = serial_port_in(port, SCSCR);
|
|
|
|
if (s->chan_tx)
|
|
|
|
new = scr | SCSCR_TDRQE;
|
|
|
|
else
|
|
|
|
new = scr & ~SCSCR_TDRQE;
|
|
|
|
if (new != scr)
|
|
|
|
serial_port_out(port, SCSCR, new);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
|
|
|
|
dma_submit_error(s->cookie_tx)) {
|
|
|
|
s->cookie_tx = 0;
|
|
|
|
schedule_work(&s->work_tx);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
|
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
|
|
|
serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_stop_tx(struct uart_port *port)
|
|
|
|
{
|
|
|
|
unsigned short ctrl;
|
|
|
|
|
|
|
|
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
|
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
|
|
|
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
ctrl &= ~SCSCR_TDRQE;
|
|
|
|
|
|
|
|
ctrl &= ~SCSCR_TIE;
|
|
|
|
|
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_start_rx(struct uart_port *port)
|
|
|
|
{
|
|
|
|
unsigned short ctrl;
|
|
|
|
|
|
|
|
ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
|
|
|
|
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
ctrl &= ~SCSCR_RDRQE;
|
|
|
|
|
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_stop_rx(struct uart_port *port)
|
|
|
|
{
|
|
|
|
unsigned short ctrl;
|
|
|
|
|
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
|
|
|
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
ctrl &= ~SCSCR_RDRQE;
|
|
|
|
|
|
|
|
ctrl &= ~port_rx_irq_mask(port);
|
|
|
|
|
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
|
|
|
}
|
|
|
|
|
2015-08-21 18:02:25 +00:00
|
|
|
static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
|
|
|
|
{
|
|
|
|
if (port->type == PORT_SCI) {
|
|
|
|
/* Just store the mask */
|
|
|
|
serial_port_out(port, SCxSR, mask);
|
2017-01-11 14:43:36 +00:00
|
|
|
} else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) {
|
2015-08-21 18:02:25 +00:00
|
|
|
/* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
|
|
|
|
/* Only clear the status bits we want to clear */
|
|
|
|
serial_port_out(port, SCxSR,
|
|
|
|
serial_port_in(port, SCxSR) & mask);
|
|
|
|
} else {
|
|
|
|
/* Store the mask, clear parity/framing errors */
|
|
|
|
serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-24 10:24:48 +00:00
|
|
|
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
|
|
|
|
defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
|
2008-12-17 05:53:24 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_CONSOLE_POLL
|
2008-12-11 10:06:43 +00:00
|
|
|
static int sci_poll_get_char(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned short status;
|
|
|
|
int c;
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
do {
|
2012-03-30 10:50:15 +00:00
|
|
|
status = serial_port_in(port, SCxSR);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (status & SCxSR_ERRORS(port)) {
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
2010-05-21 02:04:23 +00:00
|
|
|
break;
|
|
|
|
} while (1);
|
|
|
|
|
|
|
|
if (!(status & SCxSR_RDxF(port)))
|
|
|
|
return NO_POLL_CHAR;
|
2008-12-11 10:06:43 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
c = serial_port_in(port, SCxRDR);
|
2008-12-11 10:06:43 +00:00
|
|
|
|
2008-11-13 09:18:35 +00:00
|
|
|
/* Dummy read */
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_in(port, SCxSR);
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
2008-12-17 05:53:24 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-11 10:06:43 +00:00
|
|
|
static void sci_poll_put_char(struct uart_port *port, unsigned char c)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned short status;
|
|
|
|
|
|
|
|
do {
|
2012-03-30 10:50:15 +00:00
|
|
|
status = serial_port_in(port, SCxSR);
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (!(status & SCxSR_TDxE(port)));
|
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCxTDR, c);
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2015-12-24 10:24:48 +00:00
|
|
|
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
|
|
|
|
CONFIG_SERIAL_SH_SCI_EARLYCON */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-14 03:40:19 +00:00
|
|
|
static void sci_init_pins(struct uart_port *port, unsigned int cflag)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-06-14 03:40:19 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-14 03:40:19 +00:00
|
|
|
/*
|
|
|
|
* Use port-specific handler if provided.
|
|
|
|
*/
|
|
|
|
if (s->cfg->ops && s->cfg->ops->init_pins) {
|
|
|
|
s->cfg->ops->init_pins(port, cflag);
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2006-12-11 11:28:03 +00:00
|
|
|
|
2016-06-03 10:00:09 +00:00
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
2017-03-28 09:13:46 +00:00
|
|
|
u16 data = serial_port_in(port, SCPDR);
|
2016-06-03 10:00:09 +00:00
|
|
|
u16 ctrl = serial_port_in(port, SCPCR);
|
|
|
|
|
|
|
|
/* Enable RXD and TXD pin functions */
|
|
|
|
ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
|
2017-01-11 14:43:39 +00:00
|
|
|
if (to_sci_port(port)->has_rtscts) {
|
2017-03-28 09:13:46 +00:00
|
|
|
/* RTS# is output, active low, unless autorts */
|
|
|
|
if (!(port->mctrl & TIOCM_RTS)) {
|
|
|
|
ctrl |= SCPCR_RTSC;
|
|
|
|
data |= SCPDR_RTSD;
|
|
|
|
} else if (!s->autorts) {
|
|
|
|
ctrl |= SCPCR_RTSC;
|
|
|
|
data &= ~SCPDR_RTSD;
|
|
|
|
} else {
|
|
|
|
/* Enable RTS# pin function */
|
|
|
|
ctrl &= ~SCPCR_RTSC;
|
|
|
|
}
|
2016-06-03 10:00:09 +00:00
|
|
|
/* Enable CTS# pin function */
|
|
|
|
ctrl &= ~SCPCR_CTSC;
|
|
|
|
}
|
2017-03-28 09:13:46 +00:00
|
|
|
serial_port_out(port, SCPDR, data);
|
2016-06-03 10:00:09 +00:00
|
|
|
serial_port_out(port, SCPCR, ctrl);
|
|
|
|
} else if (sci_getreg(port, SCSPTR)->size) {
|
2016-06-03 10:00:08 +00:00
|
|
|
u16 status = serial_port_in(port, SCSPTR);
|
|
|
|
|
2017-03-28 09:13:46 +00:00
|
|
|
/* RTS# is always output; and active low, unless autorts */
|
|
|
|
status |= SCSPTR_RTSIO;
|
|
|
|
if (!(port->mctrl & TIOCM_RTS))
|
|
|
|
status |= SCSPTR_RTSDT;
|
|
|
|
else if (!s->autorts)
|
|
|
|
status &= ~SCSPTR_RTSDT;
|
2016-06-03 10:00:08 +00:00
|
|
|
/* CTS# and SCK are inputs */
|
|
|
|
status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
|
|
|
|
serial_port_out(port, SCSPTR, status);
|
2011-12-02 08:44:50 +00:00
|
|
|
}
|
2008-12-16 11:07:27 +00:00
|
|
|
}
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-06-14 08:38:19 +00:00
|
|
|
static int sci_txfill(struct uart_port *port)
|
2006-09-27 07:32:13 +00:00
|
|
|
{
|
2017-01-11 14:43:36 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg;
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-06-14 08:38:19 +00:00
|
|
|
reg = sci_getreg(port, SCTFDR);
|
|
|
|
if (reg->size)
|
2017-01-11 14:43:36 +00:00
|
|
|
return serial_port_in(port, SCTFDR) & fifo_mask;
|
2008-06-06 08:04:08 +00:00
|
|
|
|
2011-06-14 08:38:19 +00:00
|
|
|
reg = sci_getreg(port, SCFDR);
|
|
|
|
if (reg->size)
|
2012-03-30 10:50:15 +00:00
|
|
|
return serial_port_in(port, SCFDR) >> 8;
|
2010-05-23 16:39:09 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
return !(serial_port_in(port, SCxSR) & SCI_TDRE);
|
2006-09-27 07:32:13 +00:00
|
|
|
}
|
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
static int sci_txroom(struct uart_port *port)
|
|
|
|
{
|
2011-06-14 08:38:19 +00:00
|
|
|
return port->fifosize - sci_txfill(port);
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sci_rxfill(struct uart_port *port)
|
2006-09-27 07:32:13 +00:00
|
|
|
{
|
2017-01-11 14:43:36 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
unsigned int fifo_mask = (s->params->fifosize << 1) - 1;
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg;
|
2011-06-14 08:38:19 +00:00
|
|
|
|
|
|
|
reg = sci_getreg(port, SCRFDR);
|
|
|
|
if (reg->size)
|
2017-01-11 14:43:36 +00:00
|
|
|
return serial_port_in(port, SCRFDR) & fifo_mask;
|
2011-06-14 08:38:19 +00:00
|
|
|
|
|
|
|
reg = sci_getreg(port, SCFDR);
|
|
|
|
if (reg->size)
|
2017-01-11 14:43:36 +00:00
|
|
|
return serial_port_in(port, SCFDR) & fifo_mask;
|
2011-06-14 08:38:19 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
|
2006-09-27 07:32:13 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* ********************************************************************** *
|
|
|
|
* the interrupt related routines *
|
|
|
|
* ********************************************************************** */
|
|
|
|
|
|
|
|
static void sci_transmit_chars(struct uart_port *port)
|
|
|
|
{
|
2009-09-19 20:13:28 +00:00
|
|
|
struct circ_buf *xmit = &port->state->xmit;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned int stopped = uart_tx_stopped(port);
|
|
|
|
unsigned short status;
|
|
|
|
unsigned short ctrl;
|
2006-09-27 07:32:13 +00:00
|
|
|
int count;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
status = serial_port_in(port, SCxSR);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!(status & SCxSR_TDxE(port))) {
|
2012-03-30 10:50:15 +00:00
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
2008-11-13 09:18:35 +00:00
|
|
|
if (uart_circ_empty(xmit))
|
2009-06-24 10:44:32 +00:00
|
|
|
ctrl &= ~SCSCR_TIE;
|
2008-11-13 09:18:35 +00:00
|
|
|
else
|
2009-06-24 10:44:32 +00:00
|
|
|
ctrl |= SCSCR_TIE;
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
2005-04-16 22:20:36 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-06-14 08:38:19 +00:00
|
|
|
count = sci_txroom(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
unsigned char c;
|
|
|
|
|
|
|
|
if (port->x_char) {
|
|
|
|
c = port->x_char;
|
|
|
|
port->x_char = 0;
|
|
|
|
} else if (!uart_circ_empty(xmit) && !stopped) {
|
|
|
|
c = xmit->buf[xmit->tail];
|
|
|
|
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCxTDR, c);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
port->icount.tx++;
|
|
|
|
} while (--count > 0);
|
|
|
|
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
|
|
|
uart_write_wakeup(port);
|
|
|
|
if (uart_circ_empty(xmit)) {
|
2005-08-31 09:12:14 +00:00
|
|
|
sci_stop_tx(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2012-03-30 10:50:15 +00:00
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-11 03:19:05 +00:00
|
|
|
if (port->type != PORT_SCI) {
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_in(port, SCxSR); /* Dummy read */
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-06-24 10:44:32 +00:00
|
|
|
ctrl |= SCSCR_TIE;
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* On SH3, SCIF may read end-of-break as a space->mark char */
|
2008-11-13 09:18:35 +00:00
|
|
|
#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-01-20 14:26:18 +00:00
|
|
|
static void sci_receive_chars(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-01-03 14:53:01 +00:00
|
|
|
struct tty_port *tport = &port->state->port;
|
2005-04-16 22:20:36 +00:00
|
|
|
int i, count, copied = 0;
|
|
|
|
unsigned short status;
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
unsigned char flag;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
status = serial_port_in(port, SCxSR);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!(status & SCxSR_RDxF(port)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
/* Don't copy more bytes than there is room for in the buffer */
|
2013-01-03 14:53:01 +00:00
|
|
|
count = tty_buffer_request_room(tport, sci_rxfill(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* If for any reason we can't copy more data, we're done! */
|
|
|
|
if (count == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (port->type == PORT_SCI) {
|
2012-03-30 10:50:15 +00:00
|
|
|
char c = serial_port_in(port, SCxRDR);
|
2017-01-11 14:43:38 +00:00
|
|
|
if (uart_handle_sysrq_char(port, c))
|
2005-04-16 22:20:36 +00:00
|
|
|
count = 0;
|
2008-11-13 09:18:35 +00:00
|
|
|
else
|
2013-01-03 14:53:03 +00:00
|
|
|
tty_insert_flip_char(tport, c, TTY_NORMAL);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2008-11-13 09:18:35 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
2012-03-30 10:50:15 +00:00
|
|
|
char c = serial_port_in(port, SCxRDR);
|
2011-11-24 10:15:06 +00:00
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
status = serial_port_in(port, SCxSR);
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
if (uart_handle_sysrq_char(port, c)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
count--; i--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Store data and status */
|
2010-03-02 02:39:15 +00:00
|
|
|
if (status & SCxSR_FER(port)) {
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
flag = TTY_FRAME;
|
2011-11-24 10:15:06 +00:00
|
|
|
port->icount.frame++;
|
2008-12-16 09:55:26 +00:00
|
|
|
dev_notice(port->dev, "frame error\n");
|
2010-03-02 02:39:15 +00:00
|
|
|
} else if (status & SCxSR_PER(port)) {
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
flag = TTY_PARITY;
|
2011-11-24 10:15:06 +00:00
|
|
|
port->icount.parity++;
|
2008-12-16 09:55:26 +00:00
|
|
|
dev_notice(port->dev, "parity error\n");
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
} else
|
|
|
|
flag = TTY_NORMAL;
|
2008-12-16 09:55:26 +00:00
|
|
|
|
2013-01-03 14:53:03 +00:00
|
|
|
tty_insert_flip_char(tport, c, flag);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_in(port, SCxSR); /* dummy read */
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
copied += count;
|
|
|
|
port->icount.rx += count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copied) {
|
|
|
|
/* Tell the rest of the system the news. New characters! */
|
2013-01-03 14:53:06 +00:00
|
|
|
tty_flip_buffer_push(tport);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_in(port, SCxSR); /* dummy read */
|
2015-08-21 18:02:25 +00:00
|
|
|
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-20 14:26:18 +00:00
|
|
|
static int sci_handle_errors(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int copied = 0;
|
2012-03-30 10:50:15 +00:00
|
|
|
unsigned short status = serial_port_in(port, SCxSR);
|
2013-01-03 14:53:03 +00:00
|
|
|
struct tty_port *tport = &port->state->port;
|
2011-06-08 09:19:37 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-12-06 09:59:17 +00:00
|
|
|
/* Handle overruns */
|
2017-01-11 14:43:36 +00:00
|
|
|
if (status & s->params->overrun_mask) {
|
2013-12-06 09:59:17 +00:00
|
|
|
port->icount.overrun++;
|
2011-11-24 10:15:06 +00:00
|
|
|
|
2013-12-06 09:59:17 +00:00
|
|
|
/* overrun error */
|
|
|
|
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
|
|
|
|
copied++;
|
2008-12-16 09:55:26 +00:00
|
|
|
|
2014-03-11 17:10:46 +00:00
|
|
|
dev_notice(port->dev, "overrun error\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
if (status & SCxSR_FER(port)) {
|
2017-01-11 14:43:38 +00:00
|
|
|
/* frame error */
|
|
|
|
port->icount.frame++;
|
2011-11-24 10:15:06 +00:00
|
|
|
|
2017-01-11 14:43:38 +00:00
|
|
|
if (tty_insert_flip_char(tport, 0, TTY_FRAME))
|
|
|
|
copied++;
|
2008-12-16 09:55:26 +00:00
|
|
|
|
2017-01-11 14:43:38 +00:00
|
|
|
dev_notice(port->dev, "frame error\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
if (status & SCxSR_PER(port)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/* parity error */
|
2011-11-24 10:15:06 +00:00
|
|
|
port->icount.parity++;
|
|
|
|
|
2013-01-03 14:53:03 +00:00
|
|
|
if (tty_insert_flip_char(tport, 0, TTY_PARITY))
|
2006-09-27 07:32:13 +00:00
|
|
|
copied++;
|
2008-12-16 09:55:26 +00:00
|
|
|
|
2014-03-11 17:10:46 +00:00
|
|
|
dev_notice(port->dev, "parity error\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
if (copied)
|
2013-01-03 14:53:06 +00:00
|
|
|
tty_flip_buffer_push(tport);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
2011-01-20 14:26:18 +00:00
|
|
|
static int sci_handle_fifo_overrun(struct uart_port *port)
|
2008-12-16 10:29:38 +00:00
|
|
|
{
|
2013-01-03 14:53:03 +00:00
|
|
|
struct tty_port *tport = &port->state->port;
|
2011-06-08 09:19:37 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg;
|
2015-04-30 16:21:32 +00:00
|
|
|
int copied = 0;
|
2015-04-30 16:21:31 +00:00
|
|
|
u16 status;
|
2008-12-16 10:29:38 +00:00
|
|
|
|
2017-01-11 14:43:36 +00:00
|
|
|
reg = sci_getreg(port, s->params->overrun_reg);
|
2011-06-14 08:53:34 +00:00
|
|
|
if (!reg->size)
|
2008-12-16 10:29:38 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-01-11 14:43:36 +00:00
|
|
|
status = serial_port_in(port, s->params->overrun_reg);
|
|
|
|
if (status & s->params->overrun_mask) {
|
|
|
|
status &= ~s->params->overrun_mask;
|
|
|
|
serial_port_out(port, s->params->overrun_reg, status);
|
2008-12-16 10:29:38 +00:00
|
|
|
|
2011-11-24 10:15:06 +00:00
|
|
|
port->icount.overrun++;
|
|
|
|
|
2013-01-03 14:53:03 +00:00
|
|
|
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
|
2013-01-03 14:53:06 +00:00
|
|
|
tty_flip_buffer_push(tport);
|
2008-12-16 10:29:38 +00:00
|
|
|
|
2015-01-26 11:53:29 +00:00
|
|
|
dev_dbg(port->dev, "overrun error\n");
|
2008-12-16 10:29:38 +00:00
|
|
|
copied++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
2011-01-20 14:26:18 +00:00
|
|
|
static int sci_handle_breaks(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int copied = 0;
|
2012-03-30 10:50:15 +00:00
|
|
|
unsigned short status = serial_port_in(port, SCxSR);
|
2013-01-03 14:53:03 +00:00
|
|
|
struct tty_port *tport = &port->state->port;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-03-14 04:22:37 +00:00
|
|
|
if (uart_handle_break(port))
|
|
|
|
return 0;
|
|
|
|
|
2017-01-11 14:43:38 +00:00
|
|
|
if (status & SCxSR_BRK(port)) {
|
2011-11-24 10:15:06 +00:00
|
|
|
port->icount.brk++;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Notify of BREAK */
|
2013-01-03 14:53:03 +00:00
|
|
|
if (tty_insert_flip_char(tport, 0, TTY_BREAK))
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
copied++;
|
2008-12-16 09:55:26 +00:00
|
|
|
|
|
|
|
dev_dbg(port->dev, "BREAK detected\n");
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
[PATCH] TTY layer buffering revamp
The API and code have been through various bits of initial review by
serial driver people but they definitely need to live somewhere for a
while so the unconverted drivers can get knocked into shape, existing
drivers that have been updated can be better tuned and bugs whacked out.
This replaces the tty flip buffers with kmalloc objects in rings. In the
normal situation for an IRQ driven serial port at typical speeds the
behaviour is pretty much the same, two buffers end up allocated and the
kernel cycles between them as before.
When there are delays or at high speed we now behave far better as the
buffer pool can grow a bit rather than lose characters. This also means
that we can operate at higher speeds reliably.
For drivers that receive characters in blocks (DMA based, USB and
especially virtualisation) the layer allows a lot of driver specific
code that works around the tty layer with private secondary queues to be
removed. The IBM folks need this sort of layer, the smart serial port
people do, the virtualisers do (because a virtualised tty typically
operates at infinite speed rather than emulating 9600 baud).
Finally many drivers had invalid and unsafe attempts to avoid buffer
overflows by directly invoking tty methods extracted out of the innards
of work queue structs. These are no longer needed and all go away. That
fixes various random hangs with serial ports on overflow.
The other change in here is to optimise the receive_room path that is
used by some callers. It turns out that only one ldisc uses receive room
except asa constant and it updates it far far less than the value is
read. We thus make it a variable not a function call.
I expect the code to contain bugs due to the size alone but I'll be
watching and squashing them and feeding out new patches as it goes.
Because the buffers now dynamically expand you should only run out of
buffering when the kernel runs out of memory for real. That means a lot of
the horrible hacks high performance drivers used to do just aren't needed any
more.
Description:
tty_insert_flip_char is an old API and continues to work as before, as does
tty_flip_buffer_push() [this is why many drivers dont need modification]. It
does now also return the number of chars inserted
There are also
tty_buffer_request_room(tty, len)
which asks for a buffer block of the length requested and returns the space
found. This improves efficiency with hardware that knows how much to
transfer.
and tty_insert_flip_string_flags(tty, str, flags, len)
to insert a string of characters and flags
For a smart interface the usual code is
len = tty_request_buffer_room(tty, amount_hardware_says);
tty_insert_flip_string(tty, buffer_from_card, len);
More description!
At the moment tty buffers are attached directly to the tty. This is causing a
lot of the problems related to tty layer locking, also problems at high speed
and also with bursty data (such as occurs in virtualised environments)
I'm working on ripping out the flip buffers and replacing them with a pool of
dynamically allocated buffers. This allows both for old style "byte I/O"
devices and also helps virtualisation and smart devices where large blocks of
data suddenely materialise and need storing.
So far so good. Lots of drivers reference tty->flip.*. Several of them also
call directly and unsafely into function pointers it provides. This will all
break. Most drivers can use tty_insert_flip_char which can be kept as an API
but others need more.
At the moment I've added the following interfaces, if people think more will
be needed now is a good time to say
int tty_buffer_request_room(tty, size)
Try and ensure at least size bytes are available, returns actual room (may be
zero). At the moment it just uses the flipbuf space but that will change.
Repeated calls without characters being added are not cumulative. (ie if you
call it with 1, 1, 1, and then 4 you'll have four characters of space. The
other functions will also try and grow buffers in future but this will be a
more efficient way when you know block sizes.
int tty_insert_flip_char(tty, ch, flag)
As before insert a character if there is room. Now returns 1 for success, 0
for failure.
int tty_insert_flip_string(tty, str, len)
Insert a block of non error characters. Returns the number inserted.
int tty_prepare_flip_string(tty, strptr, len)
Adjust the buffer to allow len characters to be added. Returns a buffer
pointer in strptr and the length available. This allows for hardware that
needs to use functions like insl or mencpy_fromio.
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Paul Fulghum <paulkf@microgate.com>
Signed-off-by: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: John Hawkes <hawkes@sgi.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-10 04:54:13 +00:00
|
|
|
if (copied)
|
2013-01-03 14:53:06 +00:00
|
|
|
tty_flip_buffer_push(tport);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2008-12-16 10:29:38 +00:00
|
|
|
copied += sci_handle_fifo_overrun(port);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
2017-02-02 17:10:16 +00:00
|
|
|
static int scif_set_rtrg(struct uart_port *port, int rx_trig)
|
|
|
|
{
|
|
|
|
unsigned int bits;
|
|
|
|
|
|
|
|
if (rx_trig < 1)
|
|
|
|
rx_trig = 1;
|
|
|
|
if (rx_trig >= port->fifosize)
|
|
|
|
rx_trig = port->fifosize;
|
|
|
|
|
|
|
|
/* HSCIF can be set to an arbitrary level. */
|
|
|
|
if (sci_getreg(port, HSRTRGR)->size) {
|
|
|
|
serial_port_out(port, HSRTRGR, rx_trig);
|
|
|
|
return rx_trig;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (port->type) {
|
|
|
|
case PORT_SCIF:
|
|
|
|
if (rx_trig < 4) {
|
|
|
|
bits = 0;
|
|
|
|
rx_trig = 1;
|
|
|
|
} else if (rx_trig < 8) {
|
|
|
|
bits = SCFCR_RTRG0;
|
|
|
|
rx_trig = 4;
|
|
|
|
} else if (rx_trig < 14) {
|
|
|
|
bits = SCFCR_RTRG1;
|
|
|
|
rx_trig = 8;
|
|
|
|
} else {
|
|
|
|
bits = SCFCR_RTRG0 | SCFCR_RTRG1;
|
|
|
|
rx_trig = 14;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PORT_SCIFA:
|
|
|
|
case PORT_SCIFB:
|
|
|
|
if (rx_trig < 16) {
|
|
|
|
bits = 0;
|
|
|
|
rx_trig = 1;
|
|
|
|
} else if (rx_trig < 32) {
|
|
|
|
bits = SCFCR_RTRG0;
|
|
|
|
rx_trig = 16;
|
|
|
|
} else if (rx_trig < 48) {
|
|
|
|
bits = SCFCR_RTRG1;
|
|
|
|
rx_trig = 32;
|
|
|
|
} else {
|
|
|
|
bits = SCFCR_RTRG0 | SCFCR_RTRG1;
|
|
|
|
rx_trig = 48;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN(1, "unknown FIFO configuration");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
serial_port_out(port, SCFCR,
|
|
|
|
(serial_port_in(port, SCFCR) &
|
|
|
|
~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
|
|
|
|
|
|
|
|
return rx_trig;
|
|
|
|
}
|
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
static int scif_rtrg_enabled(struct uart_port *port)
|
|
|
|
{
|
|
|
|
if (sci_getreg(port, HSRTRGR)->size)
|
|
|
|
return serial_port_in(port, HSRTRGR) != 0;
|
|
|
|
else
|
|
|
|
return (serial_port_in(port, SCFCR) &
|
|
|
|
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
static void rx_fifo_timer_fn(struct timer_list *t)
|
2017-02-03 10:38:18 +00:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
struct sci_port *s = from_timer(s, t, rx_fifo_timer);
|
2017-02-03 10:38:18 +00:00
|
|
|
struct uart_port *port = &s->port;
|
|
|
|
|
|
|
|
dev_dbg(port->dev, "Rx timed out\n");
|
|
|
|
scif_set_rtrg(port, 1);
|
|
|
|
}
|
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
static ssize_t rx_trigger_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct uart_port *port = dev_get_drvdata(dev);
|
|
|
|
struct sci_port *sci = to_sci_port(port);
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", sci->rx_trigger);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t rx_trigger_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct uart_port *port = dev_get_drvdata(dev);
|
|
|
|
struct sci_port *sci = to_sci_port(port);
|
2017-07-17 08:34:23 +00:00
|
|
|
int ret;
|
2017-02-03 10:38:19 +00:00
|
|
|
long r;
|
|
|
|
|
2017-07-17 08:34:23 +00:00
|
|
|
ret = kstrtol(buf, 0, &r);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-02-08 17:31:14 +00:00
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
sci->rx_trigger = scif_set_rtrg(port, r);
|
2017-02-08 17:31:14 +00:00
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
scif_set_rtrg(port, 1);
|
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(rx_fifo_trigger, 0644, rx_trigger_show, rx_trigger_store);
|
|
|
|
|
|
|
|
static ssize_t rx_fifo_timeout_show(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct uart_port *port = dev_get_drvdata(dev);
|
|
|
|
struct sci_port *sci = to_sci_port(port);
|
2017-09-29 13:08:53 +00:00
|
|
|
int v;
|
2017-02-03 10:38:19 +00:00
|
|
|
|
2017-09-29 13:08:53 +00:00
|
|
|
if (port->type == PORT_HSCIF)
|
|
|
|
v = sci->hscif_tot >> HSSCR_TOT_SHIFT;
|
|
|
|
else
|
|
|
|
v = sci->rx_fifo_timeout;
|
|
|
|
|
|
|
|
return sprintf(buf, "%d\n", v);
|
2017-02-03 10:38:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t rx_fifo_timeout_store(struct device *dev,
|
|
|
|
struct device_attribute *attr,
|
|
|
|
const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct uart_port *port = dev_get_drvdata(dev);
|
|
|
|
struct sci_port *sci = to_sci_port(port);
|
2017-07-17 08:34:23 +00:00
|
|
|
int ret;
|
2017-02-03 10:38:19 +00:00
|
|
|
long r;
|
|
|
|
|
2017-07-17 08:34:23 +00:00
|
|
|
ret = kstrtol(buf, 0, &r);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-09-29 13:08:53 +00:00
|
|
|
|
|
|
|
if (port->type == PORT_HSCIF) {
|
|
|
|
if (r < 0 || r > 3)
|
|
|
|
return -EINVAL;
|
|
|
|
sci->hscif_tot = r << HSSCR_TOT_SHIFT;
|
|
|
|
} else {
|
|
|
|
sci->rx_fifo_timeout = r;
|
|
|
|
scif_set_rtrg(port, 1);
|
|
|
|
if (r > 0)
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
|
2017-09-29 13:08:53 +00:00
|
|
|
}
|
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(rx_fifo_timeout, 0644, rx_fifo_timeout_show, rx_fifo_timeout_store);
|
|
|
|
|
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_dma_tx_complete(void *arg)
|
|
|
|
{
|
|
|
|
struct sci_port *s = arg;
|
|
|
|
struct uart_port *port = &s->port;
|
|
|
|
struct circ_buf *xmit = &port->state->xmit;
|
|
|
|
unsigned long flags;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
xmit->tail += s->tx_dma_len;
|
|
|
|
xmit->tail &= UART_XMIT_SIZE - 1;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
port->icount.tx += s->tx_dma_len;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
|
|
|
uart_write_wakeup(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (!uart_circ_empty(xmit)) {
|
|
|
|
s->cookie_tx = 0;
|
|
|
|
schedule_work(&s->work_tx);
|
|
|
|
} else {
|
|
|
|
s->cookie_tx = -EINVAL;
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
u16 ctrl = serial_port_in(port, SCSCR);
|
|
|
|
serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-07-29 14:01:24 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Locking: called with port lock held */
|
|
|
|
static int sci_dma_rx_push(struct sci_port *s, void *buf, size_t count)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = &s->port;
|
|
|
|
struct tty_port *tport = &port->state->port;
|
|
|
|
int copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
copied = tty_insert_flip_string(tport, buf, count);
|
2016-11-07 15:56:50 +00:00
|
|
|
if (copied < count)
|
2015-09-18 11:08:24 +00:00
|
|
|
port->icount.buf_overrun++;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
port->icount.rx += copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return copied;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static int sci_dma_rx_find_active(struct sci_port *s)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
unsigned int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(s->cookie_rx); i++)
|
|
|
|
if (s->active_rx == s->cookie_rx[i])
|
|
|
|
return i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
|
2011-01-13 06:06:28 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct dma_chan *chan = s->chan_rx;
|
|
|
|
struct uart_port *port = &s->port;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
|
|
|
s->chan_rx = NULL;
|
|
|
|
s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
dmaengine_terminate_all(chan);
|
|
|
|
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
|
|
|
|
sg_dma_address(&s->sg_rx[0]));
|
|
|
|
dma_release_channel(chan);
|
2017-11-02 10:14:55 +00:00
|
|
|
if (enable_pio) {
|
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2015-09-18 11:08:24 +00:00
|
|
|
sci_start_rx(port);
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
}
|
2011-01-13 06:06:28 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_dma_rx_complete(void *arg)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct sci_port *s = arg;
|
2015-09-18 11:08:30 +00:00
|
|
|
struct dma_chan *chan = s->chan_rx;
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = &s->port;
|
2015-09-18 11:08:25 +00:00
|
|
|
struct dma_async_tx_descriptor *desc;
|
2015-09-18 11:08:24 +00:00
|
|
|
unsigned long flags;
|
|
|
|
int active, count = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s(%d) active cookie %d\n", __func__, port->line,
|
|
|
|
s->active_rx);
|
2015-03-16 16:19:19 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
active = sci_dma_rx_find_active(s);
|
|
|
|
if (active >= 0)
|
|
|
|
count = sci_dma_rx_push(s, s->rx_buf[active], s->buf_len_rx);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (count)
|
|
|
|
tty_flip_buffer_push(&port->state->port);
|
2015-01-26 12:25:48 +00:00
|
|
|
|
2015-09-18 11:08:25 +00:00
|
|
|
desc = dmaengine_prep_slave_sg(s->chan_rx, &s->sg_rx[active], 1,
|
|
|
|
DMA_DEV_TO_MEM,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
desc->callback = sci_dma_rx_complete;
|
|
|
|
desc->callback_param = s;
|
|
|
|
s->cookie_rx[active] = dmaengine_submit(desc);
|
|
|
|
if (dma_submit_error(s->cookie_rx[active]))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
s->active_rx = s->cookie_rx[!active];
|
|
|
|
|
2015-09-18 11:08:30 +00:00
|
|
|
dma_async_issue_pending(chan);
|
|
|
|
|
2016-11-07 15:56:50 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2015-09-18 11:08:25 +00:00
|
|
|
dev_dbg(port->dev, "%s: cookie %d #%d, new active cookie %d\n",
|
|
|
|
__func__, s->cookie_rx[active], active, s->active_rx);
|
|
|
|
return;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
|
|
|
|
sci_rx_dma_release(s, true);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct dma_chan *chan = s->chan_tx;
|
|
|
|
struct uart_port *port = &s->port;
|
2009-01-21 15:13:42 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
|
|
|
s->chan_tx = NULL;
|
|
|
|
s->cookie_tx = -EINVAL;
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
dmaengine_terminate_all(chan);
|
|
|
|
dma_unmap_single(chan->device->dev, s->tx_dma_addr, UART_XMIT_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
dma_release_channel(chan);
|
2017-11-02 10:14:55 +00:00
|
|
|
if (enable_pio) {
|
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2015-09-18 11:08:24 +00:00
|
|
|
sci_start_tx(port);
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
}
|
2015-09-18 11:08:24 +00:00
|
|
|
}
|
2011-01-19 08:19:35 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_submit_rx(struct sci_port *s)
|
|
|
|
{
|
|
|
|
struct dma_chan *chan = s->chan_rx;
|
|
|
|
int i;
|
2011-01-19 08:30:53 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
struct scatterlist *sg = &s->sg_rx[i];
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
desc = dmaengine_prep_slave_sg(chan,
|
|
|
|
sg, 1, DMA_DEV_TO_MEM,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc)
|
|
|
|
goto fail;
|
2009-01-21 15:14:30 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
desc->callback = sci_dma_rx_complete;
|
|
|
|
desc->callback_param = s;
|
|
|
|
s->cookie_rx[i] = dmaengine_submit(desc);
|
|
|
|
if (dma_submit_error(s->cookie_rx[i]))
|
|
|
|
goto fail;
|
2011-06-28 06:25:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
}
|
2011-06-28 06:25:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
s->active_rx = s->cookie_rx[0];
|
2011-06-28 06:25:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dma_async_issue_pending(chan);
|
|
|
|
return;
|
2011-06-28 06:25:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
fail:
|
|
|
|
if (i)
|
|
|
|
dmaengine_terminate_all(chan);
|
|
|
|
for (i = 0; i < 2; i++)
|
|
|
|
s->cookie_rx[i] = -EINVAL;
|
|
|
|
s->active_rx = -EINVAL;
|
|
|
|
sci_rx_dma_release(s, true);
|
|
|
|
}
|
2011-06-28 06:25:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void work_fn_tx(struct work_struct *work)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct sci_port *s = container_of(work, struct sci_port, work_tx);
|
|
|
|
struct dma_async_tx_descriptor *desc;
|
|
|
|
struct dma_chan *chan = s->chan_tx;
|
|
|
|
struct uart_port *port = &s->port;
|
|
|
|
struct circ_buf *xmit = &port->state->xmit;
|
|
|
|
dma_addr_t buf;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-06-28 06:25:36 +00:00
|
|
|
/*
|
2015-09-18 11:08:24 +00:00
|
|
|
* DMA is idle now.
|
|
|
|
* Port xmit buffer is already mapped, and it is one page... Just adjust
|
|
|
|
* offsets and lengths. Since it is a circular buffer, we have to
|
|
|
|
* transmit till the end, and then the rest. Take the port lock to get a
|
|
|
|
* consistent xmit buffer state.
|
2011-06-28 06:25:36 +00:00
|
|
|
*/
|
2015-09-18 11:08:24 +00:00
|
|
|
spin_lock_irq(&port->lock);
|
|
|
|
buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
|
|
|
|
s->tx_dma_len = min_t(unsigned int,
|
|
|
|
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
|
|
|
|
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
|
|
|
|
spin_unlock_irq(&port->lock);
|
2012-05-18 09:21:06 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
|
|
|
|
DMA_MEM_TO_DEV,
|
|
|
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
|
|
if (!desc) {
|
|
|
|
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
|
|
|
|
/* switch to PIO */
|
|
|
|
sci_tx_dma_release(s, true);
|
|
|
|
return;
|
|
|
|
}
|
2012-05-18 09:21:06 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
|
|
|
|
DMA_TO_DEVICE);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
spin_lock_irq(&port->lock);
|
|
|
|
desc->callback = sci_dma_tx_complete;
|
|
|
|
desc->callback_param = s;
|
|
|
|
spin_unlock_irq(&port->lock);
|
|
|
|
s->cookie_tx = dmaengine_submit(desc);
|
|
|
|
if (dma_submit_error(s->cookie_tx)) {
|
|
|
|
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
|
|
|
|
/* switch to PIO */
|
|
|
|
sci_tx_dma_release(s, true);
|
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
|
|
|
|
__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dma_async_issue_pending(chan);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
static void rx_timer_fn(struct timer_list *t)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
struct sci_port *s = from_timer(s, t, rx_timer);
|
2015-09-18 11:08:32 +00:00
|
|
|
struct dma_chan *chan = s->chan_rx;
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = &s->port;
|
2015-09-18 11:08:25 +00:00
|
|
|
struct dma_tx_state state;
|
|
|
|
enum dma_status status;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned int read;
|
|
|
|
int active, count;
|
|
|
|
u16 scr;
|
|
|
|
|
|
|
|
dev_dbg(port->dev, "DMA Rx timed out\n");
|
|
|
|
|
2016-11-07 15:56:50 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
|
|
|
|
2015-09-18 11:08:25 +00:00
|
|
|
active = sci_dma_rx_find_active(s);
|
|
|
|
if (active < 0) {
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
|
2015-09-18 11:08:31 +00:00
|
|
|
if (status == DMA_COMPLETE) {
|
2016-11-07 15:56:50 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2015-09-18 11:08:25 +00:00
|
|
|
dev_dbg(port->dev, "Cookie %d #%d has already completed\n",
|
|
|
|
s->active_rx, active);
|
2015-09-18 11:08:31 +00:00
|
|
|
|
|
|
|
/* Let packet complete handler take care of the packet */
|
|
|
|
return;
|
|
|
|
}
|
2015-09-18 11:08:25 +00:00
|
|
|
|
2015-09-18 11:08:32 +00:00
|
|
|
dmaengine_pause(chan);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* sometimes DMA transfer doesn't stop even if it is stopped and
|
|
|
|
* data keeps on coming until transaction is complete so check
|
|
|
|
* for DMA_COMPLETE again
|
|
|
|
* Let packet complete handler take care of the packet
|
|
|
|
*/
|
|
|
|
status = dmaengine_tx_status(s->chan_rx, s->active_rx, &state);
|
|
|
|
if (status == DMA_COMPLETE) {
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
dev_dbg(port->dev, "Transaction complete after DMA engine was stopped");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:25 +00:00
|
|
|
/* Handle incomplete DMA receive */
|
|
|
|
dmaengine_terminate_all(s->chan_rx);
|
|
|
|
read = sg_dma_len(&s->sg_rx[active]) - state.residue;
|
|
|
|
|
|
|
|
if (read) {
|
|
|
|
count = sci_dma_rx_push(s, s->rx_buf[active], read);
|
|
|
|
if (count)
|
|
|
|
tty_flip_buffer_push(&port->state->port);
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:26 +00:00
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
sci_submit_rx(s);
|
2015-09-18 11:08:29 +00:00
|
|
|
|
|
|
|
/* Direct new serial port interrupts back to CPU */
|
|
|
|
scr = serial_port_in(port, SCSCR);
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
scr &= ~SCSCR_RDRQE;
|
|
|
|
enable_irq(s->irqs[SCIx_RXI_IRQ]);
|
|
|
|
}
|
|
|
|
serial_port_out(port, SCSCR, scr | SCSCR_RIE);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:33 +00:00
|
|
|
static struct dma_chan *sci_request_dma_chan(struct uart_port *port,
|
2017-01-11 14:43:37 +00:00
|
|
|
enum dma_transfer_direction dir)
|
2015-09-18 11:08:33 +00:00
|
|
|
{
|
|
|
|
struct dma_chan *chan;
|
|
|
|
struct dma_slave_config cfg;
|
|
|
|
int ret;
|
|
|
|
|
2017-01-11 14:43:37 +00:00
|
|
|
chan = dma_request_slave_channel(port->dev,
|
|
|
|
dir == DMA_MEM_TO_DEV ? "tx" : "rx");
|
2015-09-18 11:08:33 +00:00
|
|
|
if (!chan) {
|
2017-05-22 13:15:02 +00:00
|
|
|
dev_warn(port->dev, "dma_request_slave_channel failed\n");
|
2015-09-18 11:08:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&cfg, 0, sizeof(cfg));
|
|
|
|
cfg.direction = dir;
|
|
|
|
if (dir == DMA_MEM_TO_DEV) {
|
|
|
|
cfg.dst_addr = port->mapbase +
|
|
|
|
(sci_getreg(port, SCxTDR)->offset << port->regshift);
|
|
|
|
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
} else {
|
|
|
|
cfg.src_addr = port->mapbase +
|
|
|
|
(sci_getreg(port, SCxRDR)->offset << port->regshift);
|
|
|
|
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = dmaengine_slave_config(chan, &cfg);
|
|
|
|
if (ret) {
|
|
|
|
dev_warn(port->dev, "dmaengine_slave_config failed %d\n", ret);
|
|
|
|
dma_release_channel(chan);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return chan;
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_request_dma(struct uart_port *port)
|
2010-03-02 02:39:15 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
struct dma_chan *chan;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2017-01-11 14:43:37 +00:00
|
|
|
if (!port->dev->of_node)
|
2015-09-18 11:08:24 +00:00
|
|
|
return;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
s->cookie_tx = -EINVAL;
|
2017-09-22 18:29:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't request a dma channel if no channel was specified
|
|
|
|
* in the device tree.
|
|
|
|
*/
|
|
|
|
if (!of_find_property(port->dev->of_node, "dmas", NULL))
|
|
|
|
return;
|
|
|
|
|
2017-01-11 14:43:37 +00:00
|
|
|
chan = sci_request_dma_chan(port, DMA_MEM_TO_DEV);
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
|
|
|
|
if (chan) {
|
|
|
|
s->chan_tx = chan;
|
|
|
|
/* UART circular tx buffer is an aligned page. */
|
|
|
|
s->tx_dma_addr = dma_map_single(chan->device->dev,
|
|
|
|
port->state->xmit.buf,
|
|
|
|
UART_XMIT_SIZE,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
|
|
|
|
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
|
|
|
|
dma_release_channel(chan);
|
|
|
|
s->chan_tx = NULL;
|
|
|
|
} else {
|
|
|
|
dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
|
|
|
|
__func__, UART_XMIT_SIZE,
|
|
|
|
port->state->xmit.buf, &s->tx_dma_addr);
|
serial: sh-sci: fix a race of DMA submit_tx on transfer
When DMA is enabled, sh-sci transfer begins with
uart_start()
sci_start_tx()
if (cookie_tx < 0) schedule_work()
Then, starts DMA when wq scheduled, -- (A)
process_one_work()
work_fn_rx()
cookie_tx = desc->submit_tx()
And finishes when DMA transfer ends, -- (B)
sci_dma_tx_complete()
async_tx_ack()
cookie_tx = -EINVAL
(possible another schedule_work())
This A to B sequence is not reentrant, since controlling variables
(for example, cookie_tx above) are not queues nor lists. So, they
must be invoked as A B A B..., otherwise results in kernel crash.
To ensure the sequence, sci_start_tx() seems to test if cookie_tx < 0
(represents "not used") to call schedule_work().
But cookie_tx will not be set (to a cookie, also means "used") until
in the middle of work queue scheduled function work_fn_tx().
This gap between the test and set allows the breakage of the sequence
under the very frequently call of uart_start().
Another gap between async_tx_ack() and another schedule_work() results
in the same issue, too.
This patch introduces a new condition "cookie_tx == 0" just to mark
it is "busy" and assign it within spin-locked region to fill the gaps.
Signed-off-by: Takashi Yoshii <takashi.yoshii.zj@renesas.com>
Reviewed-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Cc: stable@vger.kernel.org
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
2012-03-14 07:14:43 +00:00
|
|
|
}
|
2015-09-18 11:08:24 +00:00
|
|
|
|
|
|
|
INIT_WORK(&s->work_tx, work_fn_tx);
|
2010-03-19 13:53:04 +00:00
|
|
|
}
|
|
|
|
|
2017-01-11 14:43:37 +00:00
|
|
|
chan = sci_request_dma_chan(port, DMA_DEV_TO_MEM);
|
2015-09-18 11:08:24 +00:00
|
|
|
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
|
|
|
|
if (chan) {
|
|
|
|
unsigned int i;
|
|
|
|
dma_addr_t dma;
|
|
|
|
void *buf;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
s->chan_rx = chan;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
s->buf_len_rx = 2 * max_t(size_t, 16, port->fifosize);
|
|
|
|
buf = dma_alloc_coherent(chan->device->dev, s->buf_len_rx * 2,
|
|
|
|
&dma, GFP_KERNEL);
|
|
|
|
if (!buf) {
|
|
|
|
dev_warn(port->dev,
|
|
|
|
"Failed to allocate Rx dma buffer, using PIO\n");
|
|
|
|
dma_release_channel(chan);
|
|
|
|
s->chan_rx = NULL;
|
|
|
|
return;
|
|
|
|
}
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
struct scatterlist *sg = &s->sg_rx[i];
|
2015-08-21 18:02:51 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
sg_init_table(sg, 1);
|
|
|
|
s->rx_buf[i] = buf;
|
|
|
|
sg_dma_address(sg) = dma;
|
2015-12-04 14:21:19 +00:00
|
|
|
sg_dma_len(sg) = s->buf_len_rx;
|
2015-08-21 18:02:51 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
buf += s->buf_len_rx;
|
|
|
|
dma += s->buf_len_rx;
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
timer_setup(&s->rx_timer, rx_timer_fn, 0);
|
2015-09-18 11:08:24 +00:00
|
|
|
|
2015-09-18 11:08:26 +00:00
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
sci_submit_rx(s);
|
2015-09-18 11:08:24 +00:00
|
|
|
}
|
2015-08-21 18:02:51 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_free_dma(struct uart_port *port)
|
2010-03-02 02:39:15 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (s->chan_tx)
|
|
|
|
sci_tx_dma_release(s, false);
|
|
|
|
if (s->chan_rx)
|
|
|
|
sci_rx_dma_release(s, false);
|
|
|
|
}
|
2017-04-25 18:15:35 +00:00
|
|
|
|
|
|
|
static void sci_flush_buffer(struct uart_port *port)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In uart_flush_buffer(), the xmit circular buffer has just been
|
|
|
|
* cleared, so we have to reset tx_dma_len accordingly.
|
|
|
|
*/
|
|
|
|
to_sci_port(port)->tx_dma_len = 0;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_SERIAL_SH_SCI_DMA */
|
2015-09-18 11:08:24 +00:00
|
|
|
static inline void sci_request_dma(struct uart_port *port)
|
|
|
|
{
|
|
|
|
}
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static inline void sci_free_dma(struct uart_port *port)
|
|
|
|
{
|
|
|
|
}
|
2017-04-25 18:15:35 +00:00
|
|
|
|
|
|
|
#define sci_flush_buffer NULL
|
|
|
|
#endif /* !CONFIG_SERIAL_SH_SCI_DMA */
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
|
|
|
|
{
|
|
|
|
struct uart_port *port = ptr;
|
|
|
|
struct sci_port *s = to_sci_port(port);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
2015-09-18 11:08:24 +00:00
|
|
|
if (s->chan_rx) {
|
|
|
|
u16 scr = serial_port_in(port, SCSCR);
|
|
|
|
u16 ssr = serial_port_in(port, SCxSR);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Disable future Rx interrupts */
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
disable_irq_nosync(irq);
|
|
|
|
scr |= SCSCR_RDRQE;
|
|
|
|
} else {
|
|
|
|
scr &= ~SCSCR_RIE;
|
2015-09-18 11:08:26 +00:00
|
|
|
sci_submit_rx(s);
|
2015-09-18 11:08:24 +00:00
|
|
|
}
|
|
|
|
serial_port_out(port, SCSCR, scr);
|
|
|
|
/* Clear current interrupt */
|
|
|
|
serial_port_out(port, SCxSR,
|
|
|
|
ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
|
|
|
|
dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
|
|
|
|
jiffies, s->rx_timeout);
|
|
|
|
mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
#endif
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
|
|
|
|
if (!scif_rtrg_enabled(port))
|
|
|
|
scif_set_rtrg(port, s->rx_trigger);
|
|
|
|
|
|
|
|
mod_timer(&s->rx_fifo_timer, jiffies + DIV_ROUND_UP(
|
|
|
|
s->rx_frame * s->rx_fifo_timeout, 1000));
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* I think sci_receive_chars has to be called irrespective
|
|
|
|
* of whether the I_IXOFF is set, otherwise, how is the interrupt
|
|
|
|
* to be disabled?
|
|
|
|
*/
|
|
|
|
sci_receive_chars(ptr);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static irqreturn_t sci_tx_interrupt(int irq, void *ptr)
|
2010-03-02 02:39:15 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = ptr;
|
2015-08-21 18:02:50 +00:00
|
|
|
unsigned long flags;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-08-21 18:02:50 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2015-09-18 11:08:24 +00:00
|
|
|
sci_transmit_chars(port);
|
2015-08-21 18:02:50 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2015-09-18 11:08:24 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static irqreturn_t sci_er_interrupt(int irq, void *ptr)
|
2010-03-02 02:39:15 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = ptr;
|
|
|
|
struct sci_port *s = to_sci_port(port);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Handle errors */
|
|
|
|
if (port->type == PORT_SCI) {
|
|
|
|
if (sci_handle_errors(port)) {
|
|
|
|
/* discard character in rx buffer */
|
|
|
|
serial_port_in(port, SCxSR);
|
|
|
|
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
sci_handle_fifo_overrun(port);
|
|
|
|
if (!s->chan_rx)
|
|
|
|
sci_receive_chars(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
|
|
|
|
|
|
|
|
/* Kick the transmission */
|
|
|
|
if (!s->chan_tx)
|
|
|
|
sci_tx_interrupt(irq, ptr);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static irqreturn_t sci_br_interrupt(int irq, void *ptr)
|
2010-03-02 02:39:15 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
struct uart_port *port = ptr;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Handle BREAKs */
|
|
|
|
sci_handle_breaks(port);
|
|
|
|
sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
|
|
|
|
{
|
|
|
|
unsigned short ssr_status, scr_status, err_enabled, orer_status = 0;
|
|
|
|
struct uart_port *port = ptr;
|
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
irqreturn_t ret = IRQ_NONE;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
ssr_status = serial_port_in(port, SCxSR);
|
|
|
|
scr_status = serial_port_in(port, SCSCR);
|
2017-01-11 14:43:36 +00:00
|
|
|
if (s->params->overrun_reg == SCxSR)
|
2015-09-18 11:08:24 +00:00
|
|
|
orer_status = ssr_status;
|
2017-01-11 14:43:36 +00:00
|
|
|
else if (sci_getreg(port, s->params->overrun_reg)->size)
|
|
|
|
orer_status = serial_port_in(port, s->params->overrun_reg);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
err_enabled = scr_status & port_rx_irq_mask(port);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Tx Interrupt */
|
|
|
|
if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCSCR_TIE) &&
|
|
|
|
!s->chan_tx)
|
|
|
|
ret = sci_tx_interrupt(irq, ptr);
|
2015-08-21 18:02:47 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
|
|
|
|
* DR flags
|
|
|
|
*/
|
|
|
|
if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
|
|
|
|
(scr_status & SCSCR_RIE))
|
|
|
|
ret = sci_rx_interrupt(irq, ptr);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Error Interrupt */
|
|
|
|
if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
|
|
|
|
ret = sci_er_interrupt(irq, ptr);
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/* Break Interrupt */
|
|
|
|
if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
|
|
|
|
ret = sci_br_interrupt(irq, ptr);
|
|
|
|
|
|
|
|
/* Overrun Interrupt */
|
2017-01-11 14:43:36 +00:00
|
|
|
if (orer_status & s->params->overrun_mask) {
|
2015-09-18 11:08:24 +00:00
|
|
|
sci_handle_fifo_overrun(port);
|
|
|
|
ret = IRQ_HANDLED;
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static const struct sci_irq_desc {
|
|
|
|
const char *desc;
|
|
|
|
irq_handler_t handler;
|
|
|
|
} sci_irq_desc[] = {
|
|
|
|
/*
|
|
|
|
* Split out handlers, the default case.
|
|
|
|
*/
|
|
|
|
[SCIx_ERI_IRQ] = {
|
|
|
|
.desc = "rx err",
|
|
|
|
.handler = sci_er_interrupt,
|
|
|
|
},
|
2010-03-19 13:53:04 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
[SCIx_RXI_IRQ] = {
|
|
|
|
.desc = "rx full",
|
|
|
|
.handler = sci_rx_interrupt,
|
|
|
|
},
|
2015-08-21 18:02:48 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
[SCIx_TXI_IRQ] = {
|
|
|
|
.desc = "tx empty",
|
|
|
|
.handler = sci_tx_interrupt,
|
|
|
|
},
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
[SCIx_BRI_IRQ] = {
|
|
|
|
.desc = "break",
|
|
|
|
.handler = sci_br_interrupt,
|
|
|
|
},
|
2010-03-02 02:39:15 +00:00
|
|
|
|
|
|
|
/*
|
2015-09-18 11:08:24 +00:00
|
|
|
* Special muxed handler.
|
2010-03-02 02:39:15 +00:00
|
|
|
*/
|
2015-09-18 11:08:24 +00:00
|
|
|
[SCIx_MUX_IRQ] = {
|
|
|
|
.desc = "mux",
|
|
|
|
.handler = sci_mpxed_interrupt,
|
|
|
|
},
|
|
|
|
};
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static int sci_request_irq(struct sci_port *port)
|
|
|
|
{
|
|
|
|
struct uart_port *up = &port->port;
|
|
|
|
int i, j, ret = 0;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
for (i = j = 0; i < SCIx_NR_IRQS; i++, j++) {
|
|
|
|
const struct sci_irq_desc *desc;
|
|
|
|
int irq;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (SCIx_IRQ_IS_MUXED(port)) {
|
|
|
|
i = SCIx_MUX_IRQ;
|
|
|
|
irq = up->irq;
|
|
|
|
} else {
|
|
|
|
irq = port->irqs[i];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Certain port types won't support all of the
|
|
|
|
* available interrupt sources.
|
|
|
|
*/
|
|
|
|
if (unlikely(irq < 0))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = sci_irq_desc + i;
|
|
|
|
port->irqstr[j] = kasprintf(GFP_KERNEL, "%s:%s",
|
|
|
|
dev_name(up->dev), desc->desc);
|
2016-12-03 10:40:25 +00:00
|
|
|
if (!port->irqstr[j]) {
|
|
|
|
ret = -ENOMEM;
|
2015-09-18 11:08:24 +00:00
|
|
|
goto out_nomem;
|
2016-12-03 10:40:25 +00:00
|
|
|
}
|
2015-09-18 11:08:24 +00:00
|
|
|
|
|
|
|
ret = request_irq(irq, desc->handler, up->irqflags,
|
|
|
|
port->irqstr[j], port);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
dev_err(up->dev, "Can't allocate %s IRQ\n", desc->desc);
|
|
|
|
goto out_noirq;
|
|
|
|
}
|
2010-03-02 02:39:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
out_noirq:
|
|
|
|
while (--i >= 0)
|
|
|
|
free_irq(port->irqs[i], port);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
out_nomem:
|
|
|
|
while (--j >= 0)
|
|
|
|
kfree(port->irqstr[j]);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static void sci_free_irq(struct sci_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
int i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* Intentionally in reverse order so we iterate over the muxed
|
|
|
|
* IRQ first.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < SCIx_NR_IRQS; i++) {
|
|
|
|
int irq = port->irqs[i];
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* Certain port types won't support all of the available
|
|
|
|
* interrupt sources.
|
|
|
|
*/
|
|
|
|
if (unlikely(irq < 0))
|
|
|
|
continue;
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
free_irq(port->irqs[i], port);
|
|
|
|
kfree(port->irqstr[i]);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (SCIx_IRQ_IS_MUXED(port)) {
|
|
|
|
/* If there's only one IRQ, we're done. */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static unsigned int sci_tx_empty(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-09-18 11:08:24 +00:00
|
|
|
unsigned short status = serial_port_in(port, SCxSR);
|
|
|
|
unsigned short in_tx_fifo = sci_txfill(port);
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2016-06-03 10:00:10 +00:00
|
|
|
static void sci_set_rts(struct uart_port *port, bool state)
|
|
|
|
{
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
u16 data = serial_port_in(port, SCPDR);
|
|
|
|
|
|
|
|
/* Active low */
|
|
|
|
if (state)
|
|
|
|
data &= ~SCPDR_RTSD;
|
|
|
|
else
|
|
|
|
data |= SCPDR_RTSD;
|
|
|
|
serial_port_out(port, SCPDR, data);
|
|
|
|
|
|
|
|
/* RTS# is output */
|
|
|
|
serial_port_out(port, SCPCR,
|
|
|
|
serial_port_in(port, SCPCR) | SCPCR_RTSC);
|
|
|
|
} else if (sci_getreg(port, SCSPTR)->size) {
|
|
|
|
u16 ctrl = serial_port_in(port, SCSPTR);
|
|
|
|
|
|
|
|
/* Active low */
|
|
|
|
if (state)
|
|
|
|
ctrl &= ~SCSPTR_RTSDT;
|
|
|
|
else
|
|
|
|
ctrl |= SCSPTR_RTSDT;
|
|
|
|
serial_port_out(port, SCSPTR, ctrl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool sci_get_cts(struct uart_port *port)
|
|
|
|
{
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
/* Active low */
|
|
|
|
return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
|
|
|
|
} else if (sci_getreg(port, SCSPTR)->size) {
|
|
|
|
/* Active low */
|
|
|
|
return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* Modem control is a bit of a mixed bag for SCI(F) ports. Generally
|
|
|
|
* CTS/RTS is supported in hardware by at least one port and controlled
|
|
|
|
* via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
|
|
|
|
* handled via the ->init_pins() op, which is a bit of a one-way street,
|
|
|
|
* lacking any ability to defer pin control -- this will later be
|
|
|
|
* converted over to the GPIO framework).
|
|
|
|
*
|
|
|
|
* Other modes (such as loopback) are supported generically on certain
|
|
|
|
* port types, but not others. For these it's sufficient to test for the
|
|
|
|
* existence of the support register and simply ignore the port type.
|
|
|
|
*/
|
|
|
|
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2016-06-03 10:00:04 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
if (mctrl & TIOCM_LOOP) {
|
|
|
|
const struct plat_sci_reg *reg;
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* Standard loopback mode for SCFCR ports.
|
|
|
|
*/
|
|
|
|
reg = sci_getreg(port, SCFCR);
|
|
|
|
if (reg->size)
|
|
|
|
serial_port_out(port, SCFCR,
|
|
|
|
serial_port_in(port, SCFCR) |
|
|
|
|
SCFCR_LOOP);
|
|
|
|
}
|
2016-06-03 10:00:04 +00:00
|
|
|
|
|
|
|
mctrl_gpio_set(s->gpios, mctrl);
|
2016-06-03 10:00:10 +00:00
|
|
|
|
2017-01-11 14:43:39 +00:00
|
|
|
if (!s->has_rtscts)
|
2016-06-03 10:00:10 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(mctrl & TIOCM_RTS)) {
|
|
|
|
/* Disable Auto RTS */
|
|
|
|
serial_port_out(port, SCFCR,
|
|
|
|
serial_port_in(port, SCFCR) & ~SCFCR_MCE);
|
|
|
|
|
|
|
|
/* Clear RTS */
|
|
|
|
sci_set_rts(port, 0);
|
|
|
|
} else if (s->autorts) {
|
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
|
|
|
|
/* Enable RTS# pin function */
|
|
|
|
serial_port_out(port, SCPCR,
|
|
|
|
serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable Auto RTS */
|
|
|
|
serial_port_out(port, SCFCR,
|
|
|
|
serial_port_in(port, SCFCR) | SCFCR_MCE);
|
|
|
|
} else {
|
|
|
|
/* Set RTS */
|
|
|
|
sci_set_rts(port, 1);
|
|
|
|
}
|
2015-09-18 11:08:24 +00:00
|
|
|
}
|
2011-01-13 06:06:28 +00:00
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
static unsigned int sci_get_mctrl(struct uart_port *port)
|
|
|
|
{
|
2016-06-03 10:00:04 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
|
|
|
struct mctrl_gpios *gpios = s->gpios;
|
|
|
|
unsigned int mctrl = 0;
|
|
|
|
|
|
|
|
mctrl_gpio_get(gpios, &mctrl);
|
|
|
|
|
2015-09-18 11:08:24 +00:00
|
|
|
/*
|
|
|
|
* CTS/RTS is handled in hardware when supported, while nothing
|
2016-06-03 10:00:10 +00:00
|
|
|
* else is wired up.
|
2015-09-18 11:08:24 +00:00
|
|
|
*/
|
2016-06-03 10:00:10 +00:00
|
|
|
if (s->autorts) {
|
|
|
|
if (sci_get_cts(port))
|
|
|
|
mctrl |= TIOCM_CTS;
|
|
|
|
} else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
|
2016-06-03 10:00:04 +00:00
|
|
|
mctrl |= TIOCM_CTS;
|
2016-06-03 10:00:10 +00:00
|
|
|
}
|
2016-06-03 10:00:04 +00:00
|
|
|
if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
|
|
|
|
mctrl |= TIOCM_DSR;
|
|
|
|
if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
|
|
|
|
mctrl |= TIOCM_CAR;
|
|
|
|
|
|
|
|
return mctrl;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_enable_ms(struct uart_port *port)
|
|
|
|
{
|
|
|
|
mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_break_ctl(struct uart_port *port, int break_state)
|
|
|
|
{
|
2012-04-06 00:59:14 +00:00
|
|
|
unsigned short scscr, scsptr;
|
2017-11-02 10:14:55 +00:00
|
|
|
unsigned long flags;
|
2012-04-06 00:59:14 +00:00
|
|
|
|
2012-04-12 10:19:21 +00:00
|
|
|
/* check wheter the port has SCSPTR */
|
2016-06-03 10:00:05 +00:00
|
|
|
if (!sci_getreg(port, SCSPTR)->size) {
|
2012-04-06 00:59:14 +00:00
|
|
|
/*
|
|
|
|
* Not supported by hardware. Most parts couple break and rx
|
|
|
|
* interrupts together, with break detection always enabled.
|
|
|
|
*/
|
2012-04-12 10:19:21 +00:00
|
|
|
return;
|
2012-04-06 00:59:14 +00:00
|
|
|
}
|
2012-04-12 10:19:21 +00:00
|
|
|
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2012-04-12 10:19:21 +00:00
|
|
|
scsptr = serial_port_in(port, SCSPTR);
|
|
|
|
scscr = serial_port_in(port, SCSCR);
|
|
|
|
|
|
|
|
if (break_state == -1) {
|
|
|
|
scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
|
|
|
|
scscr &= ~SCSCR_TE;
|
|
|
|
} else {
|
|
|
|
scsptr = (scsptr | SCSPTR_SPB2DT) & ~SCSPTR_SPB2IO;
|
|
|
|
scscr |= SCSCR_TE;
|
|
|
|
}
|
|
|
|
|
|
|
|
serial_port_out(port, SCSPTR, scsptr);
|
|
|
|
serial_port_out(port, SCSCR, scscr);
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sci_startup(struct uart_port *port)
|
|
|
|
{
|
2009-01-21 15:14:38 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2011-01-19 08:30:53 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
|
|
|
|
2017-02-27 06:56:31 +00:00
|
|
|
sci_request_dma(port);
|
|
|
|
|
2011-01-19 08:30:53 +00:00
|
|
|
ret = sci_request_irq(s);
|
2017-02-27 06:56:31 +00:00
|
|
|
if (unlikely(ret < 0)) {
|
|
|
|
sci_free_dma(port);
|
2011-01-19 08:30:53 +00:00
|
|
|
return ret;
|
2017-02-27 06:56:31 +00:00
|
|
|
}
|
2011-01-19 08:30:53 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_shutdown(struct uart_port *port)
|
|
|
|
{
|
2009-01-21 15:14:38 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2012-11-16 01:54:49 +00:00
|
|
|
unsigned long flags;
|
2016-06-26 09:20:21 +00:00
|
|
|
u16 scr;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
|
|
|
|
2016-06-03 10:00:10 +00:00
|
|
|
s->autorts = false;
|
2016-06-03 10:00:04 +00:00
|
|
|
mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
|
|
|
|
|
2012-11-16 01:54:49 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
sci_stop_rx(port);
|
2005-08-31 09:12:14 +00:00
|
|
|
sci_stop_tx(port);
|
2017-09-29 13:08:53 +00:00
|
|
|
/*
|
|
|
|
* Stop RX and TX, disable related interrupts, keep clock source
|
|
|
|
* and HSCIF TOT bits
|
|
|
|
*/
|
2016-06-26 09:20:21 +00:00
|
|
|
scr = serial_port_in(port, SCSCR);
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, scr &
|
|
|
|
(SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
|
2012-11-16 01:54:49 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
2011-01-19 08:30:53 +00:00
|
|
|
|
2015-09-18 11:08:28 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
|
|
|
if (s->chan_rx) {
|
|
|
|
dev_dbg(port->dev, "%s(%d) deleting rx_timer\n", __func__,
|
|
|
|
port->line);
|
|
|
|
del_timer_sync(&s->rx_timer);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sci_free_irq(s);
|
2017-02-27 06:56:31 +00:00
|
|
|
sci_free_dma(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
static int sci_sck_calc(struct sci_port *s, unsigned int bps,
|
|
|
|
unsigned int *srr)
|
2009-06-24 09:23:52 +00:00
|
|
|
{
|
2015-11-18 10:12:26 +00:00
|
|
|
unsigned long freq = s->clk_rates[SCI_SCK];
|
|
|
|
int err, min_err = INT_MAX;
|
2016-01-04 13:45:21 +00:00
|
|
|
unsigned int sr;
|
2015-11-18 10:12:26 +00:00
|
|
|
|
2016-01-04 13:45:20 +00:00
|
|
|
if (s->port.type != PORT_HSCIF)
|
|
|
|
freq *= 2;
|
2015-11-18 10:12:26 +00:00
|
|
|
|
2016-01-04 13:45:21 +00:00
|
|
|
for_each_sr(sr, s) {
|
2015-11-18 10:12:26 +00:00
|
|
|
err = DIV_ROUND_CLOSEST(freq, sr) - bps;
|
|
|
|
if (abs(err) >= abs(min_err))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
min_err = err;
|
|
|
|
*srr = sr - 1;
|
2013-12-06 09:59:20 +00:00
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
if (!err)
|
|
|
|
break;
|
|
|
|
}
|
2011-01-19 08:51:37 +00:00
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
dev_dbg(s->port.dev, "SCK: %u%+d bps using SR %u\n", bps, min_err,
|
|
|
|
*srr + 1);
|
|
|
|
return min_err;
|
2009-06-24 09:23:52 +00:00
|
|
|
}
|
|
|
|
|
2015-11-18 10:25:53 +00:00
|
|
|
static int sci_brg_calc(struct sci_port *s, unsigned int bps,
|
|
|
|
unsigned long freq, unsigned int *dlr,
|
|
|
|
unsigned int *srr)
|
2014-07-14 07:10:00 +00:00
|
|
|
{
|
2015-11-18 10:25:53 +00:00
|
|
|
int err, min_err = INT_MAX;
|
2016-01-04 13:45:21 +00:00
|
|
|
unsigned int sr, dl;
|
2014-07-14 07:10:00 +00:00
|
|
|
|
2016-01-04 13:45:20 +00:00
|
|
|
if (s->port.type != PORT_HSCIF)
|
|
|
|
freq *= 2;
|
2014-07-14 07:10:00 +00:00
|
|
|
|
2016-01-04 13:45:21 +00:00
|
|
|
for_each_sr(sr, s) {
|
2015-11-18 10:25:53 +00:00
|
|
|
dl = DIV_ROUND_CLOSEST(freq, sr * bps);
|
|
|
|
dl = clamp(dl, 1U, 65535U);
|
|
|
|
|
|
|
|
err = DIV_ROUND_CLOSEST(freq, sr * dl) - bps;
|
|
|
|
if (abs(err) >= abs(min_err))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
min_err = err;
|
|
|
|
*dlr = dl;
|
|
|
|
*srr = sr - 1;
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
break;
|
|
|
|
}
|
2014-07-14 07:10:00 +00:00
|
|
|
|
2015-11-18 10:25:53 +00:00
|
|
|
dev_dbg(s->port.dev, "BRG: %u%+d bps using DL %u SR %u\n", bps,
|
|
|
|
min_err, *dlr, *srr + 1);
|
|
|
|
return min_err;
|
|
|
|
}
|
2014-07-14 07:10:00 +00:00
|
|
|
|
2015-11-16 16:22:16 +00:00
|
|
|
/* calculate sample rate, BRR, and clock select */
|
2015-10-26 08:58:16 +00:00
|
|
|
static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
|
|
|
|
unsigned int *brr, unsigned int *srr,
|
|
|
|
unsigned int *cks)
|
2013-05-31 15:57:01 +00:00
|
|
|
{
|
2015-10-26 08:58:16 +00:00
|
|
|
unsigned long freq = s->clk_rates[SCI_FCK];
|
2016-01-04 13:45:21 +00:00
|
|
|
unsigned int sr, br, prediv, scrate, c;
|
2015-11-16 15:33:22 +00:00
|
|
|
int err, min_err = INT_MAX;
|
2013-05-31 15:57:01 +00:00
|
|
|
|
2016-01-04 13:45:20 +00:00
|
|
|
if (s->port.type != PORT_HSCIF)
|
|
|
|
freq *= 2;
|
2015-11-16 16:22:16 +00:00
|
|
|
|
2015-11-16 15:33:22 +00:00
|
|
|
/*
|
|
|
|
* Find the combination of sample rate and clock select with the
|
|
|
|
* smallest deviation from the desired baud rate.
|
|
|
|
* Prefer high sample rates to maximise the receive margin.
|
|
|
|
*
|
|
|
|
* M: Receive margin (%)
|
|
|
|
* N: Ratio of bit rate to clock (N = sampling rate)
|
|
|
|
* D: Clock duty (D = 0 to 1.0)
|
|
|
|
* L: Frame length (L = 9 to 12)
|
|
|
|
* F: Absolute value of clock frequency deviation
|
|
|
|
*
|
|
|
|
* M = |(0.5 - 1 / 2 * N) - ((L - 0.5) * F) -
|
|
|
|
* (|D - 0.5| / N * (1 + F))|
|
|
|
|
* NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
|
|
|
|
*/
|
2016-01-04 13:45:21 +00:00
|
|
|
for_each_sr(sr, s) {
|
2013-05-31 15:57:01 +00:00
|
|
|
for (c = 0; c <= 3; c++) {
|
|
|
|
/* integerized formulas from HSCIF documentation */
|
2016-01-04 13:45:20 +00:00
|
|
|
prediv = sr * (1 << (2 * c + 1));
|
2015-11-13 16:04:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We need to calculate:
|
|
|
|
*
|
|
|
|
* br = freq / (prediv * bps) clamped to [1..256]
|
2015-11-16 14:54:47 +00:00
|
|
|
* err = freq / (br * prediv) - bps
|
2014-07-14 07:10:00 +00:00
|
|
|
*
|
2015-11-13 16:04:56 +00:00
|
|
|
* Watch out for overflow when calculating the desired
|
|
|
|
* sampling clock rate!
|
2014-07-14 07:10:00 +00:00
|
|
|
*/
|
2015-11-13 16:04:56 +00:00
|
|
|
if (bps > UINT_MAX / prediv)
|
|
|
|
break;
|
|
|
|
|
|
|
|
scrate = prediv * bps;
|
|
|
|
br = DIV_ROUND_CLOSEST(freq, scrate);
|
2015-11-13 15:56:08 +00:00
|
|
|
br = clamp(br, 1U, 256U);
|
2015-11-16 15:33:22 +00:00
|
|
|
|
2015-11-16 14:54:47 +00:00
|
|
|
err = DIV_ROUND_CLOSEST(freq, br * prediv) - bps;
|
2015-11-16 15:33:22 +00:00
|
|
|
if (abs(err) >= abs(min_err))
|
2014-07-14 07:10:00 +00:00
|
|
|
continue;
|
|
|
|
|
2015-11-16 15:33:22 +00:00
|
|
|
min_err = err;
|
2015-11-13 15:56:08 +00:00
|
|
|
*brr = br - 1;
|
2014-07-14 07:10:00 +00:00
|
|
|
*srr = sr - 1;
|
|
|
|
*cks = c;
|
2015-11-16 15:33:22 +00:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
goto found;
|
2013-05-31 15:57:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-16 15:33:22 +00:00
|
|
|
found:
|
2015-11-16 14:54:47 +00:00
|
|
|
dev_dbg(s->port.dev, "BRR: %u%+d bps using N %u SR %u cks %u\n", bps,
|
|
|
|
min_err, *brr, *srr + 1, *cks);
|
2015-10-26 08:58:16 +00:00
|
|
|
return min_err;
|
2013-05-31 15:57:01 +00:00
|
|
|
}
|
|
|
|
|
2011-08-03 03:47:36 +00:00
|
|
|
static void sci_reset(struct uart_port *port)
|
|
|
|
{
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg;
|
2011-08-03 03:47:36 +00:00
|
|
|
unsigned int status;
|
2017-02-03 10:38:17 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2011-08-03 03:47:36 +00:00
|
|
|
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
|
2011-08-03 03:47:36 +00:00
|
|
|
|
2011-11-24 09:35:49 +00:00
|
|
|
reg = sci_getreg(port, SCFCR);
|
|
|
|
if (reg->size)
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
|
2016-06-24 14:59:15 +00:00
|
|
|
|
|
|
|
sci_clear_SCxSR(port,
|
|
|
|
SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
|
|
|
|
SCxSR_BREAK_CLEAR(port));
|
2016-06-24 14:59:16 +00:00
|
|
|
if (sci_getreg(port, SCLSR)->size) {
|
|
|
|
status = serial_port_in(port, SCLSR);
|
|
|
|
status &= ~(SCLSR_TO | SCLSR_ORER);
|
|
|
|
serial_port_out(port, SCLSR, status);
|
|
|
|
}
|
2017-02-03 10:38:17 +00:00
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
if (s->rx_trigger > 1) {
|
|
|
|
if (s->rx_fifo_timeout) {
|
|
|
|
scif_set_rtrg(port, 1);
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-16 21:43:17 +00:00
|
|
|
timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
|
2017-02-03 10:38:18 +00:00
|
|
|
} else {
|
2017-02-08 17:31:14 +00:00
|
|
|
if (port->type == PORT_SCIFA ||
|
|
|
|
port->type == PORT_SCIFB)
|
|
|
|
scif_set_rtrg(port, 1);
|
|
|
|
else
|
|
|
|
scif_set_rtrg(port, s->rx_trigger);
|
2017-02-03 10:38:18 +00:00
|
|
|
}
|
|
|
|
}
|
2011-08-03 03:47:36 +00:00
|
|
|
}
|
|
|
|
|
2006-12-08 10:38:45 +00:00
|
|
|
static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
|
|
|
struct ktermios *old)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2017-02-03 10:38:18 +00:00
|
|
|
unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i, bits;
|
2015-11-18 10:25:53 +00:00
|
|
|
unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
|
|
|
|
unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0;
|
2009-06-24 08:53:33 +00:00
|
|
|
struct sci_port *s = to_sci_port(port);
|
2015-08-21 18:02:33 +00:00
|
|
|
const struct plat_sci_reg *reg;
|
2015-10-26 08:58:16 +00:00
|
|
|
int min_err = INT_MAX, err;
|
|
|
|
unsigned long max_freq = 0;
|
|
|
|
int best_clk = -1;
|
2017-11-02 10:14:55 +00:00
|
|
|
unsigned long flags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-07-14 07:10:00 +00:00
|
|
|
if ((termios->c_cflag & CSIZE) == CS7)
|
|
|
|
smr_val |= SCSMR_CHR;
|
|
|
|
if (termios->c_cflag & PARENB)
|
|
|
|
smr_val |= SCSMR_PE;
|
|
|
|
if (termios->c_cflag & PARODD)
|
|
|
|
smr_val |= SCSMR_PE | SCSMR_ODD;
|
|
|
|
if (termios->c_cflag & CSTOPB)
|
|
|
|
smr_val |= SCSMR_STOP;
|
|
|
|
|
2009-12-22 03:37:28 +00:00
|
|
|
/*
|
|
|
|
* earlyprintk comes here early on with port->uartclk set to zero.
|
|
|
|
* the clock framework is not up and running at this point so here
|
|
|
|
* we assume that 115200 is the maximum baud rate. please note that
|
|
|
|
* the baud rate is not programmed during earlyprintk - it is assumed
|
|
|
|
* that the previous boot loader has enabled required clocks and
|
|
|
|
* setup the baud rate generator hardware for us already.
|
|
|
|
*/
|
2015-10-26 08:58:16 +00:00
|
|
|
if (!port->uartclk) {
|
|
|
|
baud = uart_get_baud_rate(port, termios, old, 0, 115200);
|
|
|
|
goto done;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
for (i = 0; i < SCI_NUM_CLKS; i++)
|
|
|
|
max_freq = max(max_freq, s->clk_rates[i]);
|
|
|
|
|
2016-01-04 13:45:21 +00:00
|
|
|
baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s));
|
2015-10-26 08:58:16 +00:00
|
|
|
if (!baud)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There can be multiple sources for the sampling clock. Find the one
|
|
|
|
* that gives us the smallest deviation from the desired baud rate.
|
|
|
|
*/
|
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
/* Optional Undivided External Clock */
|
|
|
|
if (s->clk_rates[SCI_SCK] && port->type != PORT_SCIFA &&
|
|
|
|
port->type != PORT_SCIFB) {
|
|
|
|
err = sci_sck_calc(s, baud, &srr1);
|
|
|
|
if (abs(err) < abs(min_err)) {
|
|
|
|
best_clk = SCI_SCK;
|
|
|
|
scr_val = SCSCR_CKE1;
|
|
|
|
sccks = SCCKS_CKS;
|
|
|
|
min_err = err;
|
|
|
|
srr = srr1;
|
|
|
|
if (!err)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-18 10:25:53 +00:00
|
|
|
/* Optional BRG Frequency Divided External Clock */
|
|
|
|
if (s->clk_rates[SCI_SCIF_CLK] && sci_getreg(port, SCDL)->size) {
|
|
|
|
err = sci_brg_calc(s, baud, s->clk_rates[SCI_SCIF_CLK], &dl1,
|
|
|
|
&srr1);
|
|
|
|
if (abs(err) < abs(min_err)) {
|
|
|
|
best_clk = SCI_SCIF_CLK;
|
|
|
|
scr_val = SCSCR_CKE1;
|
|
|
|
sccks = 0;
|
|
|
|
min_err = err;
|
|
|
|
dl = dl1;
|
|
|
|
srr = srr1;
|
|
|
|
if (!err)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Optional BRG Frequency Divided Internal Clock */
|
|
|
|
if (s->clk_rates[SCI_BRG_INT] && sci_getreg(port, SCDL)->size) {
|
|
|
|
err = sci_brg_calc(s, baud, s->clk_rates[SCI_BRG_INT], &dl1,
|
|
|
|
&srr1);
|
|
|
|
if (abs(err) < abs(min_err)) {
|
|
|
|
best_clk = SCI_BRG_INT;
|
|
|
|
scr_val = SCSCR_CKE1;
|
|
|
|
sccks = SCCKS_XIN;
|
|
|
|
min_err = err;
|
|
|
|
dl = dl1;
|
|
|
|
srr = srr1;
|
|
|
|
if (!min_err)
|
|
|
|
goto done;
|
2013-05-31 15:57:01 +00:00
|
|
|
}
|
|
|
|
}
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
/* Divided Functional Clock using standard Bit Rate Register */
|
|
|
|
err = sci_scbrr_calc(s, baud, &brr1, &srr1, &cks1);
|
|
|
|
if (abs(err) < abs(min_err)) {
|
|
|
|
best_clk = SCI_FCK;
|
2015-11-18 10:12:26 +00:00
|
|
|
scr_val = 0;
|
2015-10-26 08:58:16 +00:00
|
|
|
min_err = err;
|
|
|
|
brr = brr1;
|
|
|
|
srr = srr1;
|
|
|
|
cks = cks1;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (best_clk >= 0)
|
|
|
|
dev_dbg(port->dev, "Using clk %pC for %u%+d bps\n",
|
|
|
|
s->clks[best_clk], baud, min_err);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-06-28 04:55:31 +00:00
|
|
|
sci_port_enable(s);
|
2011-03-03 08:04:42 +00:00
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
/*
|
|
|
|
* Program the optional External Baud Rate Generator (BRG) first.
|
|
|
|
* It controls the mux to select (H)SCK or frequency divided clock.
|
|
|
|
*/
|
2015-11-18 10:25:53 +00:00
|
|
|
if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) {
|
|
|
|
serial_port_out(port, SCDL, dl);
|
2015-11-18 10:12:26 +00:00
|
|
|
serial_port_out(port, SCCKS, sccks);
|
2015-11-18 10:25:53 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_lock_irqsave(&port->lock, flags);
|
|
|
|
|
2011-08-03 03:47:36 +00:00
|
|
|
sci_reset(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
uart_update_timeout(port, termios->c_cflag, baud);
|
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
if (best_clk >= 0) {
|
serial: sh-sci: Add support for SCIFA/SCIFB variable sampling rates
Add support for sparse variable sampling rates on SCIFA and SCIFB.
According to the datasheet, sampling rate 1/5 needs a small quirk to
avoid corrupting the first byte received.
This increases the range and accuracy of supported baud rates.
E.g. on r8a7791/koelsch:
- Supports now 134, 150, and standard 500000-4000000 bps,
- Perfect match for 134, 150, 500000, 1000000, 2000000, and 4000000
bps,
- Accuracy has increased for most standard bps values.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-01-04 13:45:22 +00:00
|
|
|
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
|
|
|
|
switch (srr + 1) {
|
|
|
|
case 5: smr_val |= SCSMR_SRC_5; break;
|
|
|
|
case 7: smr_val |= SCSMR_SRC_7; break;
|
|
|
|
case 11: smr_val |= SCSMR_SRC_11; break;
|
|
|
|
case 13: smr_val |= SCSMR_SRC_13; break;
|
|
|
|
case 16: smr_val |= SCSMR_SRC_16; break;
|
|
|
|
case 17: smr_val |= SCSMR_SRC_17; break;
|
|
|
|
case 19: smr_val |= SCSMR_SRC_19; break;
|
|
|
|
case 27: smr_val |= SCSMR_SRC_27; break;
|
|
|
|
}
|
2015-10-26 08:58:16 +00:00
|
|
|
smr_val |= cks;
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
|
2015-10-26 08:58:16 +00:00
|
|
|
serial_port_out(port, SCSMR, smr_val);
|
|
|
|
serial_port_out(port, SCBRR, brr);
|
|
|
|
if (sci_getreg(port, HSSRR)->size)
|
2013-05-31 15:57:01 +00:00
|
|
|
serial_port_out(port, HSSRR, srr | HSCIF_SRE);
|
2015-10-26 08:58:16 +00:00
|
|
|
|
|
|
|
/* Wait one bit interval */
|
|
|
|
udelay((1000000 + (baud - 1)) / baud);
|
|
|
|
} else {
|
|
|
|
/* Don't touch the bit rate configuration */
|
|
|
|
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
|
2016-01-04 13:45:19 +00:00
|
|
|
smr_val |= serial_port_in(port, SCSMR) &
|
|
|
|
(SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
|
2012-11-16 01:52:49 +00:00
|
|
|
serial_port_out(port, SCSMR, smr_val);
|
2015-10-26 08:58:16 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-12-16 11:07:27 +00:00
|
|
|
sci_init_pins(port, termios->c_cflag);
|
2011-11-24 09:35:49 +00:00
|
|
|
|
2016-06-03 10:00:10 +00:00
|
|
|
port->status &= ~UPSTAT_AUTOCTS;
|
|
|
|
s->autorts = false;
|
2011-12-02 10:02:06 +00:00
|
|
|
reg = sci_getreg(port, SCFCR);
|
|
|
|
if (reg->size) {
|
2012-03-30 10:50:15 +00:00
|
|
|
unsigned short ctrl = serial_port_in(port, SCFCR);
|
2011-11-24 09:35:49 +00:00
|
|
|
|
2016-06-03 10:00:10 +00:00
|
|
|
if ((port->flags & UPF_HARD_FLOW) &&
|
|
|
|
(termios->c_cflag & CRTSCTS)) {
|
|
|
|
/* There is no CTS interrupt to restart the hardware */
|
|
|
|
port->status |= UPSTAT_AUTOCTS;
|
|
|
|
/* MCE is enabled when RTS is raised */
|
|
|
|
s->autorts = true;
|
2011-12-02 08:44:50 +00:00
|
|
|
}
|
2011-12-02 10:02:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As we've done a sci_reset() above, ensure we don't
|
|
|
|
* interfere with the FIFOs while toggling MCE. As the
|
|
|
|
* reset values could still be set, simply mask them out.
|
|
|
|
*/
|
|
|
|
ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
|
|
|
|
|
2012-03-30 10:50:15 +00:00
|
|
|
serial_port_out(port, SCFCR, ctrl);
|
2011-11-24 09:35:49 +00:00
|
|
|
}
|
2017-03-28 09:13:45 +00:00
|
|
|
if (port->flags & UPF_HARD_FLOW) {
|
|
|
|
/* Refresh (Auto) RTS */
|
|
|
|
sci_set_mctrl(port, port->mctrl);
|
|
|
|
}
|
2006-02-01 11:06:06 +00:00
|
|
|
|
2017-01-11 14:43:23 +00:00
|
|
|
scr_val |= SCSCR_RE | SCSCR_TE |
|
|
|
|
(s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
|
serial: sh-sci: Add support for SCIFA/SCIFB variable sampling rates
Add support for sparse variable sampling rates on SCIFA and SCIFB.
According to the datasheet, sampling rate 1/5 needs a small quirk to
avoid corrupting the first byte received.
This increases the range and accuracy of supported baud rates.
E.g. on r8a7791/koelsch:
- Supports now 134, 150, and standard 500000-4000000 bps,
- Perfect match for 134, 150, 500000, 1000000, 2000000, and 4000000
bps,
- Accuracy has increased for most standard bps values.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-01-04 13:45:22 +00:00
|
|
|
if ((srr + 1 == 5) &&
|
|
|
|
(port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
|
|
|
|
/*
|
|
|
|
* In asynchronous mode, when the sampling rate is 1/5, first
|
|
|
|
* received data may become invalid on some SCIFA and SCIFB.
|
|
|
|
* To avoid this problem wait more than 1 serial data time (1
|
|
|
|
* bit time x serial data number) after setting SCSCR.RE = 1.
|
|
|
|
*/
|
|
|
|
udelay(DIV_ROUND_UP(10 * 1000000, baud));
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-03-19 13:53:04 +00:00
|
|
|
/*
|
2015-03-16 16:19:54 +00:00
|
|
|
* Calculate delay for 2 DMA buffers (4 FIFO).
|
2015-08-21 18:02:38 +00:00
|
|
|
* See serial_core.c::uart_update_timeout().
|
|
|
|
* With 10 bits (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above
|
|
|
|
* function calculates 1 jiffie for the data plus 5 jiffies for the
|
|
|
|
* "slop(e)." Then below we calculate 5 jiffies (20ms) for 2 DMA
|
|
|
|
* buffers (4 FIFO sizes), but when performing a faster transfer, the
|
|
|
|
* value obtained by this formula is too small. Therefore, if the value
|
|
|
|
* is smaller than 20ms, use 20ms as the timeout value for DMA.
|
2010-03-19 13:53:04 +00:00
|
|
|
*/
|
2017-02-03 10:38:18 +00:00
|
|
|
/* byte size and parity */
|
|
|
|
switch (termios->c_cflag & CSIZE) {
|
|
|
|
case CS5:
|
|
|
|
bits = 7;
|
|
|
|
break;
|
|
|
|
case CS6:
|
|
|
|
bits = 8;
|
|
|
|
break;
|
|
|
|
case CS7:
|
|
|
|
bits = 9;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
bits = 10;
|
|
|
|
break;
|
|
|
|
}
|
2015-03-16 16:19:54 +00:00
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
if (termios->c_cflag & CSTOPB)
|
|
|
|
bits++;
|
|
|
|
if (termios->c_cflag & PARENB)
|
|
|
|
bits++;
|
2015-03-16 16:19:54 +00:00
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
s->rx_frame = (100 * bits * HZ) / (baud / 10);
|
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_DMA
|
|
|
|
s->rx_timeout = DIV_ROUND_UP(s->buf_len_rx * 2 * s->rx_frame, 1000);
|
|
|
|
if (s->rx_timeout < msecs_to_jiffies(20))
|
|
|
|
s->rx_timeout = msecs_to_jiffies(20);
|
2010-03-19 13:53:04 +00:00
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((termios->c_cflag & CREAD) != 0)
|
2010-03-02 02:39:15 +00:00
|
|
|
sci_start_rx(port);
|
2011-03-03 08:04:42 +00:00
|
|
|
|
2017-11-02 10:14:55 +00:00
|
|
|
spin_unlock_irqrestore(&port->lock, flags);
|
|
|
|
|
2011-06-28 04:55:31 +00:00
|
|
|
sci_port_disable(s);
|
2016-06-03 10:00:04 +00:00
|
|
|
|
|
|
|
if (UART_ENABLE_MS(port, termios->c_cflag))
|
|
|
|
sci_enable_ms(port);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
serial: sh-sci: console runtime PM support (revisit)
The commit 1ba7622094 (serial: sh-sci: console Runtime PM support,
from Magnus Damm <damm@opensource.se>, 2011-08-03), tried to support
console runtime PM, but unfortunately it didn't work for us for some
reason. We did not investigated further at that time, instead would
like to propose a different approach.
In Linux tty/serial world, to get console PM work properly, a serial
client driver does not have to maintain .runtime_suspend()/..resume()
calls itself, but can leave console power power management handling to
the serial core driver.
This patch moves the sh-sci driver in that direction.
Notes:
* There is room to optimize console runtime PM more aggressively by
maintaining additional local runtime PM calls, but as a first step
having .pm() operation would suffice.
* We still have a couple of direct calls to sci_port_enable/..disable
left in the driver. We have to live with them, because they're out
of serial core's help.
Signed-off-by: Teppei Kamijou <teppei.kamijou.yb@renesas.com>
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:51:55 +00:00
|
|
|
static void sci_pm(struct uart_port *port, unsigned int state,
|
|
|
|
unsigned int oldstate)
|
|
|
|
{
|
|
|
|
struct sci_port *sci_port = to_sci_port(port);
|
|
|
|
|
|
|
|
switch (state) {
|
2014-03-11 10:11:20 +00:00
|
|
|
case UART_PM_STATE_OFF:
|
serial: sh-sci: console runtime PM support (revisit)
The commit 1ba7622094 (serial: sh-sci: console Runtime PM support,
from Magnus Damm <damm@opensource.se>, 2011-08-03), tried to support
console runtime PM, but unfortunately it didn't work for us for some
reason. We did not investigated further at that time, instead would
like to propose a different approach.
In Linux tty/serial world, to get console PM work properly, a serial
client driver does not have to maintain .runtime_suspend()/..resume()
calls itself, but can leave console power power management handling to
the serial core driver.
This patch moves the sh-sci driver in that direction.
Notes:
* There is room to optimize console runtime PM more aggressively by
maintaining additional local runtime PM calls, but as a first step
having .pm() operation would suffice.
* We still have a couple of direct calls to sci_port_enable/..disable
left in the driver. We have to live with them, because they're out
of serial core's help.
Signed-off-by: Teppei Kamijou <teppei.kamijou.yb@renesas.com>
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:51:55 +00:00
|
|
|
sci_port_disable(sci_port);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sci_port_enable(sci_port);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static const char *sci_type(struct uart_port *port)
|
|
|
|
{
|
|
|
|
switch (port->type) {
|
2008-11-13 09:18:35 +00:00
|
|
|
case PORT_IRDA:
|
|
|
|
return "irda";
|
|
|
|
case PORT_SCI:
|
|
|
|
return "sci";
|
|
|
|
case PORT_SCIF:
|
|
|
|
return "scif";
|
|
|
|
case PORT_SCIFA:
|
|
|
|
return "scifa";
|
2010-05-23 16:39:09 +00:00
|
|
|
case PORT_SCIFB:
|
|
|
|
return "scifb";
|
2013-05-31 15:57:01 +00:00
|
|
|
case PORT_HSCIF:
|
|
|
|
return "hscif";
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-09-04 09:53:58 +00:00
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-01-21 06:25:36 +00:00
|
|
|
static int sci_remap_port(struct uart_port *port)
|
|
|
|
{
|
2015-05-16 14:57:31 +00:00
|
|
|
struct sci_port *sport = to_sci_port(port);
|
2011-01-21 06:25:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Nothing to do if there's already an established membase.
|
|
|
|
*/
|
|
|
|
if (port->membase)
|
|
|
|
return 0;
|
|
|
|
|
2017-01-11 14:43:24 +00:00
|
|
|
if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
|
2015-05-16 14:57:31 +00:00
|
|
|
port->membase = ioremap_nocache(port->mapbase, sport->reg_size);
|
2011-01-21 06:25:36 +00:00
|
|
|
if (unlikely(!port->membase)) {
|
|
|
|
dev_err(port->dev, "can't remap port#%d\n", port->line);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For the simple (and majority of) cases where we don't
|
|
|
|
* need to do any remapping, just cast the cookie
|
|
|
|
* directly.
|
|
|
|
*/
|
2014-02-05 00:56:37 +00:00
|
|
|
port->membase = (void __iomem *)(uintptr_t)port->mapbase;
|
2011-01-21 06:25:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-20 12:24:03 +00:00
|
|
|
static void sci_release_port(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-05-16 14:57:31 +00:00
|
|
|
struct sci_port *sport = to_sci_port(port);
|
|
|
|
|
2017-01-11 14:43:24 +00:00
|
|
|
if (port->dev->of_node || (port->flags & UPF_IOREMAP)) {
|
2011-01-20 12:24:03 +00:00
|
|
|
iounmap(port->membase);
|
|
|
|
port->membase = NULL;
|
|
|
|
}
|
|
|
|
|
2015-05-16 14:57:31 +00:00
|
|
|
release_mem_region(port->mapbase, sport->reg_size);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2011-01-20 12:24:03 +00:00
|
|
|
static int sci_request_port(struct uart_port *port)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-01-20 12:24:03 +00:00
|
|
|
struct resource *res;
|
2015-05-16 14:57:31 +00:00
|
|
|
struct sci_port *sport = to_sci_port(port);
|
2011-01-21 06:25:36 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-05-16 14:57:31 +00:00
|
|
|
res = request_mem_region(port->mapbase, sport->reg_size,
|
|
|
|
dev_name(port->dev));
|
|
|
|
if (unlikely(res == NULL)) {
|
|
|
|
dev_err(port->dev, "request_mem_region failed.");
|
2011-01-20 12:24:03 +00:00
|
|
|
return -EBUSY;
|
2015-05-16 14:57:31 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-01-21 06:25:36 +00:00
|
|
|
ret = sci_remap_port(port);
|
|
|
|
if (unlikely(ret != 0)) {
|
|
|
|
release_resource(res);
|
|
|
|
return ret;
|
2008-10-01 06:46:58 +00:00
|
|
|
}
|
2011-01-20 12:24:03 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sci_config_port(struct uart_port *port, int flags)
|
|
|
|
{
|
|
|
|
if (flags & UART_CONFIG_TYPE) {
|
|
|
|
struct sci_port *sport = to_sci_port(port);
|
|
|
|
|
|
|
|
port->type = sport->cfg->type;
|
|
|
|
sci_request_port(port);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
|
|
|
|
{
|
|
|
|
if (ser->baud_base < 2400)
|
|
|
|
/* No paper tape reader for Mitch.. */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-09-01 17:51:35 +00:00
|
|
|
static const struct uart_ops sci_uart_ops = {
|
2005-04-16 22:20:36 +00:00
|
|
|
.tx_empty = sci_tx_empty,
|
|
|
|
.set_mctrl = sci_set_mctrl,
|
|
|
|
.get_mctrl = sci_get_mctrl,
|
|
|
|
.start_tx = sci_start_tx,
|
|
|
|
.stop_tx = sci_stop_tx,
|
|
|
|
.stop_rx = sci_stop_rx,
|
2016-06-03 10:00:04 +00:00
|
|
|
.enable_ms = sci_enable_ms,
|
2005-04-16 22:20:36 +00:00
|
|
|
.break_ctl = sci_break_ctl,
|
|
|
|
.startup = sci_startup,
|
|
|
|
.shutdown = sci_shutdown,
|
2017-04-25 18:15:35 +00:00
|
|
|
.flush_buffer = sci_flush_buffer,
|
2005-04-16 22:20:36 +00:00
|
|
|
.set_termios = sci_set_termios,
|
serial: sh-sci: console runtime PM support (revisit)
The commit 1ba7622094 (serial: sh-sci: console Runtime PM support,
from Magnus Damm <damm@opensource.se>, 2011-08-03), tried to support
console runtime PM, but unfortunately it didn't work for us for some
reason. We did not investigated further at that time, instead would
like to propose a different approach.
In Linux tty/serial world, to get console PM work properly, a serial
client driver does not have to maintain .runtime_suspend()/..resume()
calls itself, but can leave console power power management handling to
the serial core driver.
This patch moves the sh-sci driver in that direction.
Notes:
* There is room to optimize console runtime PM more aggressively by
maintaining additional local runtime PM calls, but as a first step
having .pm() operation would suffice.
* We still have a couple of direct calls to sci_port_enable/..disable
left in the driver. We have to live with them, because they're out
of serial core's help.
Signed-off-by: Teppei Kamijou <teppei.kamijou.yb@renesas.com>
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:51:55 +00:00
|
|
|
.pm = sci_pm,
|
2005-04-16 22:20:36 +00:00
|
|
|
.type = sci_type,
|
|
|
|
.release_port = sci_release_port,
|
|
|
|
.request_port = sci_request_port,
|
|
|
|
.config_port = sci_config_port,
|
|
|
|
.verify_port = sci_verify_port,
|
2008-12-11 10:06:43 +00:00
|
|
|
#ifdef CONFIG_CONSOLE_POLL
|
|
|
|
.poll_get_char = sci_poll_get_char,
|
|
|
|
.poll_put_char = sci_poll_put_char,
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2015-09-14 12:14:23 +00:00
|
|
|
static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
|
|
|
|
{
|
2015-10-26 08:58:16 +00:00
|
|
|
const char *clk_names[] = {
|
|
|
|
[SCI_FCK] = "fck",
|
2015-11-18 10:12:26 +00:00
|
|
|
[SCI_SCK] = "sck",
|
2015-11-18 10:25:53 +00:00
|
|
|
[SCI_BRG_INT] = "brg_int",
|
|
|
|
[SCI_SCIF_CLK] = "scif_clk",
|
2015-10-26 08:58:16 +00:00
|
|
|
};
|
|
|
|
struct clk *clk;
|
|
|
|
unsigned int i;
|
2015-09-14 12:14:23 +00:00
|
|
|
|
2015-11-18 10:12:26 +00:00
|
|
|
if (sci_port->cfg->type == PORT_HSCIF)
|
|
|
|
clk_names[SCI_SCK] = "hsck";
|
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
for (i = 0; i < SCI_NUM_CLKS; i++) {
|
|
|
|
clk = devm_clk_get(dev, clk_names[i]);
|
|
|
|
if (PTR_ERR(clk) == -EPROBE_DEFER)
|
|
|
|
return -EPROBE_DEFER;
|
2015-09-14 12:14:23 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
if (IS_ERR(clk) && i == SCI_FCK) {
|
|
|
|
/*
|
|
|
|
* "fck" used to be called "sci_ick", and we need to
|
|
|
|
* maintain DT backward compatibility.
|
|
|
|
*/
|
|
|
|
clk = devm_clk_get(dev, "sci_ick");
|
|
|
|
if (PTR_ERR(clk) == -EPROBE_DEFER)
|
|
|
|
return -EPROBE_DEFER;
|
2015-09-14 12:14:23 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
if (!IS_ERR(clk))
|
|
|
|
goto found;
|
2015-09-14 12:14:23 +00:00
|
|
|
|
2015-10-26 08:58:16 +00:00
|
|
|
/*
|
|
|
|
* Not all SH platforms declare a clock lookup entry
|
|
|
|
* for SCI devices, in which case we need to get the
|
|
|
|
* global "peripheral_clk" clock.
|
|
|
|
*/
|
|
|
|
clk = devm_clk_get(dev, "peripheral_clk");
|
|
|
|
if (!IS_ERR(clk))
|
|
|
|
goto found;
|
|
|
|
|
|
|
|
dev_err(dev, "failed to get %s (%ld)\n", clk_names[i],
|
|
|
|
PTR_ERR(clk));
|
|
|
|
return PTR_ERR(clk);
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
|
|
|
if (IS_ERR(clk))
|
|
|
|
dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
|
|
|
|
PTR_ERR(clk));
|
|
|
|
else
|
|
|
|
dev_dbg(dev, "clk %s is %pC rate %pCr\n", clk_names[i],
|
|
|
|
clk, clk);
|
|
|
|
sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
|
|
|
|
}
|
|
|
|
return 0;
|
2015-09-14 12:14:23 +00:00
|
|
|
}
|
|
|
|
|
2017-01-11 14:43:35 +00:00
|
|
|
static const struct sci_port_params *
|
|
|
|
sci_probe_regmap(const struct plat_sci_port *cfg)
|
|
|
|
{
|
|
|
|
unsigned int regtype;
|
|
|
|
|
|
|
|
if (cfg->regtype != SCIx_PROBE_REGTYPE)
|
|
|
|
return &sci_port_params[cfg->regtype];
|
|
|
|
|
|
|
|
switch (cfg->type) {
|
|
|
|
case PORT_SCI:
|
|
|
|
regtype = SCIx_SCI_REGTYPE;
|
|
|
|
break;
|
|
|
|
case PORT_IRDA:
|
|
|
|
regtype = SCIx_IRDA_REGTYPE;
|
|
|
|
break;
|
|
|
|
case PORT_SCIFA:
|
|
|
|
regtype = SCIx_SCIFA_REGTYPE;
|
|
|
|
break;
|
|
|
|
case PORT_SCIFB:
|
|
|
|
regtype = SCIx_SCIFB_REGTYPE;
|
|
|
|
break;
|
|
|
|
case PORT_SCIF:
|
|
|
|
/*
|
|
|
|
* The SH-4 is a bit of a misnomer here, although that's
|
|
|
|
* where this particular port layout originated. This
|
|
|
|
* configuration (or some slight variation thereof)
|
|
|
|
* remains the dominant model for all SCIFs.
|
|
|
|
*/
|
|
|
|
regtype = SCIx_SH4_SCIF_REGTYPE;
|
|
|
|
break;
|
|
|
|
case PORT_HSCIF:
|
|
|
|
regtype = SCIx_HSCIF_REGTYPE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_err("Can't probe register map for given port\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &sci_port_params[regtype];
|
|
|
|
}
|
|
|
|
|
2012-11-19 18:21:50 +00:00
|
|
|
static int sci_init_single(struct platform_device *dev,
|
2013-12-06 09:59:16 +00:00
|
|
|
struct sci_port *sci_port, unsigned int index,
|
2017-01-11 14:43:35 +00:00
|
|
|
const struct plat_sci_port *p, bool early)
|
2006-09-27 07:32:13 +00:00
|
|
|
{
|
2010-03-02 02:39:15 +00:00
|
|
|
struct uart_port *port = &sci_port->port;
|
2013-12-06 09:59:16 +00:00
|
|
|
const struct resource *res;
|
|
|
|
unsigned int i;
|
2011-06-28 04:44:37 +00:00
|
|
|
int ret;
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-12-02 11:09:48 +00:00
|
|
|
sci_port->cfg = p;
|
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
port->ops = &sci_uart_ops;
|
|
|
|
port->iotype = UPIO_MEM;
|
|
|
|
port->line = index;
|
2010-01-14 23:33:20 +00:00
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
|
|
|
|
if (res == NULL)
|
|
|
|
return -ENOMEM;
|
2013-12-06 09:59:16 +00:00
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
port->mapbase = res->start;
|
2015-05-16 14:57:31 +00:00
|
|
|
sci_port->reg_size = resource_size(res);
|
2013-12-06 09:59:16 +00:00
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
|
|
|
|
sci_port->irqs[i] = platform_get_irq(dev, i);
|
2013-12-06 09:59:16 +00:00
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
/* The SCI generates several interrupts. They can be muxed together or
|
|
|
|
* connected to different interrupt lines. In the muxed case only one
|
|
|
|
* interrupt resource is specified. In the non-muxed case three or four
|
|
|
|
* interrupt resources are specified, as the BRI interrupt is optional.
|
|
|
|
*/
|
|
|
|
if (sci_port->irqs[0] < 0)
|
|
|
|
return -ENXIO;
|
2013-12-06 09:59:16 +00:00
|
|
|
|
2013-12-06 09:59:52 +00:00
|
|
|
if (sci_port->irqs[1] < 0) {
|
|
|
|
sci_port->irqs[1] = sci_port->irqs[0];
|
|
|
|
sci_port->irqs[2] = sci_port->irqs[0];
|
|
|
|
sci_port->irqs[3] = sci_port->irqs[0];
|
2013-12-06 09:59:16 +00:00
|
|
|
}
|
|
|
|
|
2017-01-11 14:43:35 +00:00
|
|
|
sci_port->params = sci_probe_regmap(p);
|
|
|
|
if (unlikely(sci_port->params == NULL))
|
|
|
|
return -EINVAL;
|
2017-01-11 14:43:34 +00:00
|
|
|
|
2017-02-03 10:38:17 +00:00
|
|
|
switch (p->type) {
|
|
|
|
case PORT_SCIFB:
|
|
|
|
sci_port->rx_trigger = 48;
|
|
|
|
break;
|
|
|
|
case PORT_HSCIF:
|
|
|
|
sci_port->rx_trigger = 64;
|
|
|
|
break;
|
|
|
|
case PORT_SCIFA:
|
|
|
|
sci_port->rx_trigger = 32;
|
|
|
|
break;
|
|
|
|
case PORT_SCIF:
|
|
|
|
if (p->regtype == SCIx_SH7705_SCIF_REGTYPE)
|
|
|
|
/* RX triggering not implemented for this IP */
|
|
|
|
sci_port->rx_trigger = 1;
|
|
|
|
else
|
|
|
|
sci_port->rx_trigger = 8;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
sci_port->rx_trigger = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-02-03 10:38:18 +00:00
|
|
|
sci_port->rx_fifo_timeout = 0;
|
2017-09-29 13:08:53 +00:00
|
|
|
sci_port->hscif_tot = 0;
|
2017-02-03 10:38:18 +00:00
|
|
|
|
2013-12-06 09:59:51 +00:00
|
|
|
/* SCIFA on sh7723 and sh7724 need a custom sampling rate that doesn't
|
|
|
|
* match the SoC datasheet, this should be investigated. Let platform
|
|
|
|
* data override the sampling rate for now.
|
2013-12-06 09:59:20 +00:00
|
|
|
*/
|
2017-01-11 14:43:36 +00:00
|
|
|
sci_port->sampling_rate_mask = p->sampling_rate
|
|
|
|
? SCI_SR(p->sampling_rate)
|
|
|
|
: sci_port->params->sampling_rate_mask;
|
2013-12-06 09:59:20 +00:00
|
|
|
|
2013-12-06 09:59:16 +00:00
|
|
|
if (!early) {
|
2015-09-14 12:14:23 +00:00
|
|
|
ret = sci_init_clocks(sci_port, &dev->dev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2010-03-10 09:35:14 +00:00
|
|
|
|
2010-03-02 02:39:15 +00:00
|
|
|
port->dev = &dev->dev;
|
2011-04-19 10:38:25 +00:00
|
|
|
|
|
|
|
pm_runtime_enable(&dev->dev);
|
2009-12-14 10:24:42 +00:00
|
|
|
}
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-01-19 06:24:40 +00:00
|
|
|
port->type = p->type;
|
2017-01-11 14:43:24 +00:00
|
|
|
port->flags = UPF_FIXED_PORT | UPF_BOOT_AUTOCONF | p->flags;
|
2017-01-11 14:43:36 +00:00
|
|
|
port->fifosize = sci_port->params->fifosize;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2017-01-11 14:43:40 +00:00
|
|
|
if (port->type == PORT_SCI) {
|
|
|
|
if (sci_port->reg_size >= 0x20)
|
|
|
|
port->regshift = 2;
|
|
|
|
else
|
|
|
|
port->regshift = 1;
|
|
|
|
}
|
|
|
|
|
2011-01-19 06:24:40 +00:00
|
|
|
/*
|
2011-06-14 03:40:19 +00:00
|
|
|
* The UART port needs an IRQ value, so we peg this to the RX IRQ
|
2011-01-19 06:24:40 +00:00
|
|
|
* for the multi-IRQ ports, which is where we are primarily
|
|
|
|
* concerned with the shutdown path synchronization.
|
|
|
|
*
|
|
|
|
* For the muxed case there's nothing more to do.
|
|
|
|
*/
|
2013-12-06 09:59:16 +00:00
|
|
|
port->irq = sci_port->irqs[SCIx_RXI_IRQ];
|
2011-09-22 08:59:15 +00:00
|
|
|
port->irqflags = 0;
|
2010-03-02 02:39:15 +00:00
|
|
|
|
2011-06-14 03:40:19 +00:00
|
|
|
port->serial_in = sci_serial_in;
|
|
|
|
port->serial_out = sci_serial_out;
|
|
|
|
|
2010-03-10 09:35:14 +00:00
|
|
|
return 0;
|
2006-09-27 07:32:13 +00:00
|
|
|
}
|
|
|
|
|
2012-06-12 22:28:23 +00:00
|
|
|
static void sci_cleanup_single(struct sci_port *port)
|
|
|
|
{
|
|
|
|
pm_runtime_disable(port->port.dev);
|
|
|
|
}
|
|
|
|
|
2015-12-24 10:24:48 +00:00
|
|
|
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
|
|
|
|
defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
|
2009-01-21 15:14:06 +00:00
|
|
|
static void serial_console_putchar(struct uart_port *port, int ch)
|
|
|
|
{
|
|
|
|
sci_poll_put_char(port, ch);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Print a string to the serial port trying not to disturb
|
|
|
|
* any possible real use of the port...
|
|
|
|
*/
|
|
|
|
static void serial_console_write(struct console *co, const char *s,
|
|
|
|
unsigned count)
|
|
|
|
{
|
2011-01-21 07:19:53 +00:00
|
|
|
struct sci_port *sci_port = &sci_ports[co->index];
|
|
|
|
struct uart_port *port = &sci_port->port;
|
2015-11-18 15:20:44 +00:00
|
|
|
unsigned short bits, ctrl, ctrl_temp;
|
serial: sh-sci: add locking to console write function to avoid SMP lockup
Symptom:
When entering the suspend with Android logcat running, printk() call
gets stuck and never returns. The issue can be observed at printk()s
on nonboot CPUs when going to offline with their interrupts disabled,
and never seen at boot CPU (core0 in our case).
Details:
serial_console_write() lacks of appropriate spinlock handling.
In SMP systems, as long as sci_transmit_chars() is being processed
at one CPU core, serial_console_write() can stuck at the other CPU
core(s), when it tries to access to the same serial port _without_
a proper locking. serial_console_write() waits for the transmit FIFO
getting empty, while sci_transmit_chars() writes data to the FIFO.
In general, peripheral interrupts are routed to boot CPU (core0) by
Linux ARM standard affinity settings. SCI(F) interrupts are handled
by core0, so sci_transmit_chars() is processed on core0 as well.
When logcat is running, it writes enormous log data to the kernel at
every moment, forever. So core0 can repeatedly continue to process
sci_transmit_chars() in its interrupt handler, which eventually makes
the other CPU core(s) stuck at serial_console_write().
Looking at serial/8250.c, this is a known console write lockup issue
with SMP kernels. Fix the sh-sci driver in the same way 8250.c does.
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:54:15 +00:00
|
|
|
unsigned long flags;
|
|
|
|
int locked = 1;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
2015-12-24 10:24:48 +00:00
|
|
|
#if defined(SUPPORT_SYSRQ)
|
serial: sh-sci: add locking to console write function to avoid SMP lockup
Symptom:
When entering the suspend with Android logcat running, printk() call
gets stuck and never returns. The issue can be observed at printk()s
on nonboot CPUs when going to offline with their interrupts disabled,
and never seen at boot CPU (core0 in our case).
Details:
serial_console_write() lacks of appropriate spinlock handling.
In SMP systems, as long as sci_transmit_chars() is being processed
at one CPU core, serial_console_write() can stuck at the other CPU
core(s), when it tries to access to the same serial port _without_
a proper locking. serial_console_write() waits for the transmit FIFO
getting empty, while sci_transmit_chars() writes data to the FIFO.
In general, peripheral interrupts are routed to boot CPU (core0) by
Linux ARM standard affinity settings. SCI(F) interrupts are handled
by core0, so sci_transmit_chars() is processed on core0 as well.
When logcat is running, it writes enormous log data to the kernel at
every moment, forever. So core0 can repeatedly continue to process
sci_transmit_chars() in its interrupt handler, which eventually makes
the other CPU core(s) stuck at serial_console_write().
Looking at serial/8250.c, this is a known console write lockup issue
with SMP kernels. Fix the sh-sci driver in the same way 8250.c does.
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:54:15 +00:00
|
|
|
if (port->sysrq)
|
|
|
|
locked = 0;
|
2015-12-24 10:24:48 +00:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (oops_in_progress)
|
serial: sh-sci: add locking to console write function to avoid SMP lockup
Symptom:
When entering the suspend with Android logcat running, printk() call
gets stuck and never returns. The issue can be observed at printk()s
on nonboot CPUs when going to offline with their interrupts disabled,
and never seen at boot CPU (core0 in our case).
Details:
serial_console_write() lacks of appropriate spinlock handling.
In SMP systems, as long as sci_transmit_chars() is being processed
at one CPU core, serial_console_write() can stuck at the other CPU
core(s), when it tries to access to the same serial port _without_
a proper locking. serial_console_write() waits for the transmit FIFO
getting empty, while sci_transmit_chars() writes data to the FIFO.
In general, peripheral interrupts are routed to boot CPU (core0) by
Linux ARM standard affinity settings. SCI(F) interrupts are handled
by core0, so sci_transmit_chars() is processed on core0 as well.
When logcat is running, it writes enormous log data to the kernel at
every moment, forever. So core0 can repeatedly continue to process
sci_transmit_chars() in its interrupt handler, which eventually makes
the other CPU core(s) stuck at serial_console_write().
Looking at serial/8250.c, this is a known console write lockup issue
with SMP kernels. Fix the sh-sci driver in the same way 8250.c does.
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:54:15 +00:00
|
|
|
locked = spin_trylock(&port->lock);
|
|
|
|
else
|
|
|
|
spin_lock(&port->lock);
|
|
|
|
|
2015-11-18 15:20:44 +00:00
|
|
|
/* first save SCSCR then disable interrupts, keep clock source */
|
serial: sh-sci: add locking to console write function to avoid SMP lockup
Symptom:
When entering the suspend with Android logcat running, printk() call
gets stuck and never returns. The issue can be observed at printk()s
on nonboot CPUs when going to offline with their interrupts disabled,
and never seen at boot CPU (core0 in our case).
Details:
serial_console_write() lacks of appropriate spinlock handling.
In SMP systems, as long as sci_transmit_chars() is being processed
at one CPU core, serial_console_write() can stuck at the other CPU
core(s), when it tries to access to the same serial port _without_
a proper locking. serial_console_write() waits for the transmit FIFO
getting empty, while sci_transmit_chars() writes data to the FIFO.
In general, peripheral interrupts are routed to boot CPU (core0) by
Linux ARM standard affinity settings. SCI(F) interrupts are handled
by core0, so sci_transmit_chars() is processed on core0 as well.
When logcat is running, it writes enormous log data to the kernel at
every moment, forever. So core0 can repeatedly continue to process
sci_transmit_chars() in its interrupt handler, which eventually makes
the other CPU core(s) stuck at serial_console_write().
Looking at serial/8250.c, this is a known console write lockup issue
with SMP kernels. Fix the sh-sci driver in the same way 8250.c does.
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:54:15 +00:00
|
|
|
ctrl = serial_port_in(port, SCSCR);
|
2017-01-11 14:43:23 +00:00
|
|
|
ctrl_temp = SCSCR_RE | SCSCR_TE |
|
|
|
|
(sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
|
2015-11-18 15:20:44 +00:00
|
|
|
(ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
|
2017-09-29 13:08:53 +00:00
|
|
|
serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
|
2008-12-11 10:06:43 +00:00
|
|
|
|
2009-01-21 15:14:30 +00:00
|
|
|
uart_console_write(port, s, count, serial_console_putchar);
|
2009-02-24 06:57:12 +00:00
|
|
|
|
|
|
|
/* wait until fifo is empty and last bit has been transmitted */
|
|
|
|
bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
|
2012-03-30 10:50:15 +00:00
|
|
|
while ((serial_port_in(port, SCxSR) & bits) != bits)
|
2009-02-24 06:57:12 +00:00
|
|
|
cpu_relax();
|
serial: sh-sci: add locking to console write function to avoid SMP lockup
Symptom:
When entering the suspend with Android logcat running, printk() call
gets stuck and never returns. The issue can be observed at printk()s
on nonboot CPUs when going to offline with their interrupts disabled,
and never seen at boot CPU (core0 in our case).
Details:
serial_console_write() lacks of appropriate spinlock handling.
In SMP systems, as long as sci_transmit_chars() is being processed
at one CPU core, serial_console_write() can stuck at the other CPU
core(s), when it tries to access to the same serial port _without_
a proper locking. serial_console_write() waits for the transmit FIFO
getting empty, while sci_transmit_chars() writes data to the FIFO.
In general, peripheral interrupts are routed to boot CPU (core0) by
Linux ARM standard affinity settings. SCI(F) interrupts are handled
by core0, so sci_transmit_chars() is processed on core0 as well.
When logcat is running, it writes enormous log data to the kernel at
every moment, forever. So core0 can repeatedly continue to process
sci_transmit_chars() in its interrupt handler, which eventually makes
the other CPU core(s) stuck at serial_console_write().
Looking at serial/8250.c, this is a known console write lockup issue
with SMP kernels. Fix the sh-sci driver in the same way 8250.c does.
Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-16 01:54:15 +00:00
|
|
|
|
|
|
|
/* restore the SCSCR */
|
|
|
|
serial_port_out(port, SCSCR, ctrl);
|
|
|
|
|
|
|
|
if (locked)
|
|
|
|
spin_unlock(&port->lock);
|
|
|
|
local_irq_restore(flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-11-19 18:21:50 +00:00
|
|
|
static int serial_console_setup(struct console *co, char *options)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-01-21 15:14:06 +00:00
|
|
|
struct sci_port *sci_port;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct uart_port *port;
|
|
|
|
int baud = 115200;
|
|
|
|
int bits = 8;
|
|
|
|
int parity = 'n';
|
|
|
|
int flow = 'n';
|
|
|
|
int ret;
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
/*
|
2011-01-21 07:19:53 +00:00
|
|
|
* Refuse to handle any bogus ports.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2011-01-21 07:19:53 +00:00
|
|
|
if (co->index < 0 || co->index >= SCI_NPORTS)
|
2006-09-27 07:32:13 +00:00
|
|
|
return -ENODEV;
|
|
|
|
|
2011-01-21 07:19:53 +00:00
|
|
|
sci_port = &sci_ports[co->index];
|
|
|
|
port = &sci_port->port;
|
|
|
|
|
2011-02-09 03:18:46 +00:00
|
|
|
/*
|
|
|
|
* Refuse to handle uninitialized ports.
|
|
|
|
*/
|
|
|
|
if (!port->ops)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2011-01-21 06:25:36 +00:00
|
|
|
ret = sci_remap_port(port);
|
|
|
|
if (unlikely(ret != 0))
|
|
|
|
return ret;
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (options)
|
|
|
|
uart_parse_options(options, &baud, &parity, &bits, &flow);
|
|
|
|
|
2011-06-01 05:47:42 +00:00
|
|
|
return uart_set_options(port, co, baud, parity, bits, flow);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct console serial_console = {
|
|
|
|
.name = "ttySC",
|
2011-01-21 07:19:53 +00:00
|
|
|
.device = uart_console_device,
|
2005-04-16 22:20:36 +00:00
|
|
|
.write = serial_console_write,
|
|
|
|
.setup = serial_console_setup,
|
2007-03-08 08:27:37 +00:00
|
|
|
.flags = CON_PRINTBUFFER,
|
2005-04-16 22:20:36 +00:00
|
|
|
.index = -1,
|
2011-01-21 07:19:53 +00:00
|
|
|
.data = &sci_uart_driver,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2009-12-14 10:24:42 +00:00
|
|
|
static struct console early_serial_console = {
|
|
|
|
.name = "early_ttySC",
|
|
|
|
.write = serial_console_write,
|
|
|
|
.flags = CON_PRINTBUFFER,
|
2011-01-21 07:19:53 +00:00
|
|
|
.index = -1,
|
2009-12-14 10:24:42 +00:00
|
|
|
};
|
2011-01-20 15:05:48 +00:00
|
|
|
|
2009-12-14 10:24:42 +00:00
|
|
|
static char early_serial_buf[32];
|
|
|
|
|
2012-11-19 18:21:50 +00:00
|
|
|
static int sci_probe_earlyprintk(struct platform_device *pdev)
|
2011-01-20 15:05:48 +00:00
|
|
|
{
|
2017-01-11 14:43:35 +00:00
|
|
|
const struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
|
2011-01-20 15:05:48 +00:00
|
|
|
|
|
|
|
if (early_serial_console.data)
|
|
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
early_serial_console.index = pdev->id;
|
|
|
|
|
2013-12-06 09:59:16 +00:00
|
|
|
sci_init_single(pdev, &sci_ports[pdev->id], pdev->id, cfg, true);
|
2011-01-20 15:05:48 +00:00
|
|
|
|
|
|
|
serial_console_setup(&early_serial_console, early_serial_buf);
|
|
|
|
|
|
|
|
if (!strstr(early_serial_buf, "keep"))
|
|
|
|
early_serial_console.flags |= CON_BOOT;
|
|
|
|
|
|
|
|
register_console(&early_serial_console);
|
|
|
|
return 0;
|
|
|
|
}
|
2011-03-24 02:20:56 +00:00
|
|
|
|
|
|
|
#define SCI_CONSOLE (&serial_console)
|
|
|
|
|
2011-01-20 15:05:48 +00:00
|
|
|
#else
|
2012-11-19 18:21:50 +00:00
|
|
|
static inline int sci_probe_earlyprintk(struct platform_device *pdev)
|
2011-01-20 15:05:48 +00:00
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2011-03-24 02:20:56 +00:00
|
|
|
#define SCI_CONSOLE NULL
|
|
|
|
|
2015-12-24 10:24:48 +00:00
|
|
|
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-03-11 10:11:17 +00:00
|
|
|
static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-04-20 12:13:01 +00:00
|
|
|
static DEFINE_MUTEX(sci_uart_registration_lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct uart_driver sci_uart_driver = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.driver_name = "sci",
|
|
|
|
.dev_name = "ttySC",
|
|
|
|
.major = SCI_MAJOR,
|
|
|
|
.minor = SCI_MINOR_START,
|
2006-09-27 07:32:13 +00:00
|
|
|
.nr = SCI_NPORTS,
|
2005-04-16 22:20:36 +00:00
|
|
|
.cons = SCI_CONSOLE,
|
|
|
|
};
|
|
|
|
|
2009-05-08 14:48:33 +00:00
|
|
|
static int sci_remove(struct platform_device *dev)
|
2009-01-21 15:13:42 +00:00
|
|
|
{
|
2011-01-19 08:19:35 +00:00
|
|
|
struct sci_port *port = platform_get_drvdata(dev);
|
2009-01-21 15:13:42 +00:00
|
|
|
|
2011-01-19 08:19:35 +00:00
|
|
|
uart_remove_one_port(&sci_uart_driver, &port->port);
|
|
|
|
|
2012-06-12 22:28:23 +00:00
|
|
|
sci_cleanup_single(port);
|
2009-01-21 15:13:42 +00:00
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
if (port->port.fifosize > 1) {
|
|
|
|
sysfs_remove_file(&dev->dev.kobj,
|
|
|
|
&dev_attr_rx_fifo_trigger.attr);
|
|
|
|
}
|
2017-09-29 13:08:53 +00:00
|
|
|
if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
|
|
|
|
port->port.type == PORT_HSCIF) {
|
2017-02-03 10:38:19 +00:00
|
|
|
sysfs_remove_file(&dev->dev.kobj,
|
|
|
|
&dev_attr_rx_fifo_timeout.attr);
|
|
|
|
}
|
|
|
|
|
2009-01-21 15:13:42 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-10 15:09:23 +00:00
|
|
|
|
|
|
|
#define SCI_OF_DATA(type, regtype) (void *)((type) << 16 | (regtype))
|
|
|
|
#define SCI_OF_TYPE(data) ((unsigned long)(data) >> 16)
|
|
|
|
#define SCI_OF_REGTYPE(data) ((unsigned long)(data) & 0xffff)
|
2013-12-06 09:59:54 +00:00
|
|
|
|
|
|
|
static const struct of_device_id of_sci_match[] = {
|
serial: sh-sci: Correct SCIF type on RZ/A1H
The "renesas,scif" compatible value is currently used for the SCIF
variant in all Renesas SoCs of the R-Car and RZ families. However, the
variant used in the RZ family is not the common "SH-4(A)" variant, but
the "SH-2(A) with FIFO data count register" variant, as it has the
"Serial Extension Mode Register" (SCEMR), just like on sh7203, sh7263,
sh7264, and sh7269.
Use the (already documented) SoC-specific "renesas,scif-r7s72100"
compatible value to differentiate. The "renesas,scif" compatible value
can still be used as a common denominator for SCIF variants with the
"SH-4(A)" register layout (i.e. ignoring the SCEMR register).
Note that currently both variants are treated the same, but this may
change if support for the SCEMR register is ever added.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-11-10 15:16:54 +00:00
|
|
|
/* SoC-specific types */
|
|
|
|
{
|
|
|
|
.compatible = "renesas,scif-r7s72100",
|
|
|
|
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
|
|
|
|
},
|
2015-11-10 17:57:23 +00:00
|
|
|
/* Family-specific types */
|
|
|
|
{
|
|
|
|
.compatible = "renesas,rcar-gen1-scif",
|
|
|
|
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
|
|
|
|
}, {
|
|
|
|
.compatible = "renesas,rcar-gen2-scif",
|
|
|
|
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
|
|
|
|
}, {
|
|
|
|
.compatible = "renesas,rcar-gen3-scif",
|
|
|
|
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
|
|
|
|
},
|
serial: sh-sci: Correct SCIF type on RZ/A1H
The "renesas,scif" compatible value is currently used for the SCIF
variant in all Renesas SoCs of the R-Car and RZ families. However, the
variant used in the RZ family is not the common "SH-4(A)" variant, but
the "SH-2(A) with FIFO data count register" variant, as it has the
"Serial Extension Mode Register" (SCEMR), just like on sh7203, sh7263,
sh7264, and sh7269.
Use the (already documented) SoC-specific "renesas,scif-r7s72100"
compatible value to differentiate. The "renesas,scif" compatible value
can still be used as a common denominator for SCIF variants with the
"SH-4(A)" register layout (i.e. ignoring the SCEMR register).
Note that currently both variants are treated the same, but this may
change if support for the SCEMR register is ever added.
Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
Reviewed-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2015-11-10 15:16:54 +00:00
|
|
|
/* Generic types */
|
2013-12-06 09:59:54 +00:00
|
|
|
{
|
|
|
|
.compatible = "renesas,scif",
|
2015-11-10 15:09:23 +00:00
|
|
|
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_REGTYPE),
|
2013-12-06 09:59:54 +00:00
|
|
|
}, {
|
|
|
|
.compatible = "renesas,scifa",
|
2015-11-10 15:09:23 +00:00
|
|
|
.data = SCI_OF_DATA(PORT_SCIFA, SCIx_SCIFA_REGTYPE),
|
2013-12-06 09:59:54 +00:00
|
|
|
}, {
|
|
|
|
.compatible = "renesas,scifb",
|
2015-11-10 15:09:23 +00:00
|
|
|
.data = SCI_OF_DATA(PORT_SCIFB, SCIx_SCIFB_REGTYPE),
|
2013-12-06 09:59:54 +00:00
|
|
|
}, {
|
|
|
|
.compatible = "renesas,hscif",
|
2015-11-10 15:09:23 +00:00
|
|
|
.data = SCI_OF_DATA(PORT_HSCIF, SCIx_HSCIF_REGTYPE),
|
2015-01-27 17:53:55 +00:00
|
|
|
}, {
|
|
|
|
.compatible = "renesas,sci",
|
2015-11-10 15:09:23 +00:00
|
|
|
.data = SCI_OF_DATA(PORT_SCI, SCIx_SCI_REGTYPE),
|
2013-12-06 09:59:54 +00:00
|
|
|
}, {
|
|
|
|
/* Terminator */
|
|
|
|
},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, of_sci_match);
|
|
|
|
|
2017-01-25 14:55:49 +00:00
|
|
|
static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
|
|
|
|
unsigned int *dev_id)
|
2013-12-06 09:59:54 +00:00
|
|
|
{
|
|
|
|
struct device_node *np = pdev->dev.of_node;
|
|
|
|
struct plat_sci_port *p;
|
2017-01-11 14:43:39 +00:00
|
|
|
struct sci_port *sp;
|
2017-10-04 12:21:56 +00:00
|
|
|
const void *data;
|
2013-12-06 09:59:54 +00:00
|
|
|
int id;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_OF) || !np)
|
|
|
|
return NULL;
|
|
|
|
|
2017-10-04 12:21:56 +00:00
|
|
|
data = of_device_get_match_data(&pdev->dev);
|
2013-12-06 09:59:54 +00:00
|
|
|
|
|
|
|
p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
|
2015-08-21 18:02:34 +00:00
|
|
|
if (!p)
|
2013-12-06 09:59:54 +00:00
|
|
|
return NULL;
|
|
|
|
|
2015-11-12 12:39:49 +00:00
|
|
|
/* Get the line number from the aliases node. */
|
2013-12-06 09:59:54 +00:00
|
|
|
id = of_alias_get_id(np, "serial");
|
|
|
|
if (id < 0) {
|
|
|
|
dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-01-11 14:43:39 +00:00
|
|
|
sp = &sci_ports[id];
|
2013-12-06 09:59:54 +00:00
|
|
|
*dev_id = id;
|
|
|
|
|
2017-10-04 12:21:56 +00:00
|
|
|
p->type = SCI_OF_TYPE(data);
|
|
|
|
p->regtype = SCI_OF_REGTYPE(data);
|
2013-12-06 09:59:54 +00:00
|
|
|
|
2017-08-13 19:11:24 +00:00
|
|
|
sp->has_rtscts = of_property_read_bool(np, "uart-has-rtscts");
|
2016-06-03 10:00:11 +00:00
|
|
|
|
2013-12-06 09:59:54 +00:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2012-11-19 18:21:50 +00:00
|
|
|
static int sci_probe_single(struct platform_device *dev,
|
2009-01-21 15:13:50 +00:00
|
|
|
unsigned int index,
|
|
|
|
struct plat_sci_port *p,
|
|
|
|
struct sci_port *sciport)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Sanity check */
|
|
|
|
if (unlikely(index >= SCI_NPORTS)) {
|
2014-03-11 17:10:46 +00:00
|
|
|
dev_notice(&dev->dev, "Attempting to register port %d when only %d are available\n",
|
2009-01-21 15:13:50 +00:00
|
|
|
index+1, SCI_NPORTS);
|
2014-03-11 17:10:46 +00:00
|
|
|
dev_notice(&dev->dev, "Consider bumping CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
|
2012-06-12 22:28:24 +00:00
|
|
|
return -EINVAL;
|
2009-01-21 15:13:50 +00:00
|
|
|
}
|
|
|
|
|
2017-04-20 12:13:01 +00:00
|
|
|
mutex_lock(&sci_uart_registration_lock);
|
|
|
|
if (!sci_uart_driver.state) {
|
|
|
|
ret = uart_register_driver(&sci_uart_driver);
|
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&sci_uart_registration_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&sci_uart_registration_lock);
|
|
|
|
|
2013-12-06 09:59:16 +00:00
|
|
|
ret = sci_init_single(dev, sciport, index, p, false);
|
2010-03-10 09:35:14 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2009-01-21 15:13:50 +00:00
|
|
|
|
2016-06-03 10:00:04 +00:00
|
|
|
sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
|
|
|
|
if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
|
|
|
|
return PTR_ERR(sciport->gpios);
|
|
|
|
|
2017-01-11 14:43:39 +00:00
|
|
|
if (sciport->has_rtscts) {
|
2016-06-03 10:00:04 +00:00
|
|
|
if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
|
|
|
|
UART_GPIO_CTS)) ||
|
|
|
|
!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
|
|
|
|
UART_GPIO_RTS))) {
|
|
|
|
dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2016-06-03 10:00:10 +00:00
|
|
|
sciport->port.flags |= UPF_HARD_FLOW;
|
2016-06-03 10:00:04 +00:00
|
|
|
}
|
|
|
|
|
2012-06-12 22:28:23 +00:00
|
|
|
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
|
|
|
|
if (ret) {
|
|
|
|
sci_cleanup_single(sciport);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-01-21 15:13:50 +00:00
|
|
|
}
|
|
|
|
|
2012-11-19 18:21:50 +00:00
|
|
|
static int sci_probe(struct platform_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2013-12-06 09:59:54 +00:00
|
|
|
struct plat_sci_port *p;
|
|
|
|
struct sci_port *sp;
|
|
|
|
unsigned int dev_id;
|
2011-01-20 15:05:48 +00:00
|
|
|
int ret;
|
2011-01-19 08:19:35 +00:00
|
|
|
|
2011-01-20 15:05:48 +00:00
|
|
|
/*
|
|
|
|
* If we've come here via earlyprintk initialization, head off to
|
|
|
|
* the special early probe. We don't have sufficient device state
|
|
|
|
* to make it beyond this yet.
|
|
|
|
*/
|
|
|
|
if (is_early_platform_device(dev))
|
|
|
|
return sci_probe_earlyprintk(dev);
|
2009-12-14 10:24:42 +00:00
|
|
|
|
2013-12-06 09:59:54 +00:00
|
|
|
if (dev->dev.of_node) {
|
|
|
|
p = sci_parse_dt(dev, &dev_id);
|
|
|
|
if (p == NULL)
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
p = dev->dev.platform_data;
|
|
|
|
if (p == NULL) {
|
|
|
|
dev_err(&dev->dev, "no platform data supplied\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_id = dev->id;
|
|
|
|
}
|
|
|
|
|
|
|
|
sp = &sci_ports[dev_id];
|
2011-01-19 08:19:35 +00:00
|
|
|
platform_set_drvdata(dev, sp);
|
2009-01-21 15:13:42 +00:00
|
|
|
|
2013-12-06 09:59:54 +00:00
|
|
|
ret = sci_probe_single(dev, dev_id, p, sp);
|
2011-01-19 08:19:35 +00:00
|
|
|
if (ret)
|
2012-06-12 22:28:23 +00:00
|
|
|
return ret;
|
2009-01-21 15:13:42 +00:00
|
|
|
|
2017-02-03 10:38:19 +00:00
|
|
|
if (sp->port.fifosize > 1) {
|
|
|
|
ret = sysfs_create_file(&dev->dev.kobj,
|
|
|
|
&dev_attr_rx_fifo_trigger.attr);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2017-09-29 13:08:53 +00:00
|
|
|
if (sp->port.type == PORT_SCIFA || sp->port.type == PORT_SCIFB ||
|
|
|
|
sp->port.type == PORT_HSCIF) {
|
2017-02-03 10:38:19 +00:00
|
|
|
ret = sysfs_create_file(&dev->dev.kobj,
|
|
|
|
&dev_attr_rx_fifo_timeout.attr);
|
|
|
|
if (ret) {
|
|
|
|
if (sp->port.fifosize > 1) {
|
|
|
|
sysfs_remove_file(&dev->dev.kobj,
|
|
|
|
&dev_attr_rx_fifo_trigger.attr);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SH_STANDARD_BIOS
|
|
|
|
sh_bios_gdb_detach();
|
|
|
|
#endif
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-01-16 21:56:02 +00:00
|
|
|
static __maybe_unused int sci_suspend(struct device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-01-19 08:19:35 +00:00
|
|
|
struct sci_port *sport = dev_get_drvdata(dev);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-01-19 08:19:35 +00:00
|
|
|
if (sport)
|
|
|
|
uart_suspend_port(&sci_uart_driver, &sport->port);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-01-16 21:56:02 +00:00
|
|
|
static __maybe_unused int sci_resume(struct device *dev)
|
2006-09-27 07:32:13 +00:00
|
|
|
{
|
2011-01-19 08:19:35 +00:00
|
|
|
struct sci_port *sport = dev_get_drvdata(dev);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2011-01-19 08:19:35 +00:00
|
|
|
if (sport)
|
|
|
|
uart_resume_port(&sci_uart_driver, &sport->port);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-01-16 21:56:02 +00:00
|
|
|
static SIMPLE_DEV_PM_OPS(sci_dev_pm_ops, sci_suspend, sci_resume);
|
2009-06-14 22:07:38 +00:00
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
static struct platform_driver sci_driver = {
|
|
|
|
.probe = sci_probe,
|
2009-11-24 21:07:32 +00:00
|
|
|
.remove = sci_remove,
|
2006-09-27 07:32:13 +00:00
|
|
|
.driver = {
|
|
|
|
.name = "sh-sci",
|
2009-06-14 22:07:38 +00:00
|
|
|
.pm = &sci_dev_pm_ops,
|
2013-12-06 09:59:54 +00:00
|
|
|
.of_match_table = of_match_ptr(of_sci_match),
|
2006-09-27 07:32:13 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init sci_init(void)
|
|
|
|
{
|
2014-03-11 10:11:17 +00:00
|
|
|
pr_info("%s\n", banner);
|
2006-09-27 07:32:13 +00:00
|
|
|
|
2017-04-20 12:13:01 +00:00
|
|
|
return platform_driver_register(&sci_driver);
|
2006-09-27 07:32:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit sci_exit(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&sci_driver);
|
2017-04-20 12:13:01 +00:00
|
|
|
|
|
|
|
if (sci_uart_driver.state)
|
|
|
|
uart_unregister_driver(&sci_uart_driver);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-12-14 10:24:42 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
|
|
|
|
early_platform_init_buffer("earlyprintk", &sci_driver,
|
|
|
|
early_serial_buf, ARRAY_SIZE(early_serial_buf));
|
|
|
|
#endif
|
2015-12-24 10:24:48 +00:00
|
|
|
#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
|
2017-10-10 01:26:22 +00:00
|
|
|
static struct plat_sci_port port_cfg __initdata;
|
2015-12-24 10:24:48 +00:00
|
|
|
|
|
|
|
static int __init early_console_setup(struct earlycon_device *device,
|
|
|
|
int type)
|
|
|
|
{
|
|
|
|
if (!device->port.membase)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
device->port.serial_in = sci_serial_in;
|
|
|
|
device->port.serial_out = sci_serial_out;
|
|
|
|
device->port.type = type;
|
|
|
|
memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
|
2017-01-11 14:43:35 +00:00
|
|
|
port_cfg.type = type;
|
2015-12-24 10:24:48 +00:00
|
|
|
sci_ports[0].cfg = &port_cfg;
|
2017-01-11 14:43:35 +00:00
|
|
|
sci_ports[0].params = sci_probe_regmap(&port_cfg);
|
2017-01-11 14:43:23 +00:00
|
|
|
port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR);
|
|
|
|
sci_serial_out(&sci_ports[0].port, SCSCR,
|
|
|
|
SCSCR_RE | SCSCR_TE | port_cfg.scscr);
|
2015-12-24 10:24:48 +00:00
|
|
|
|
|
|
|
device->con->write = serial_console_write;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int __init sci_early_console_setup(struct earlycon_device *device,
|
|
|
|
const char *opt)
|
|
|
|
{
|
|
|
|
return early_console_setup(device, PORT_SCI);
|
|
|
|
}
|
|
|
|
static int __init scif_early_console_setup(struct earlycon_device *device,
|
|
|
|
const char *opt)
|
|
|
|
{
|
|
|
|
return early_console_setup(device, PORT_SCIF);
|
|
|
|
}
|
|
|
|
static int __init scifa_early_console_setup(struct earlycon_device *device,
|
|
|
|
const char *opt)
|
|
|
|
{
|
|
|
|
return early_console_setup(device, PORT_SCIFA);
|
|
|
|
}
|
|
|
|
static int __init scifb_early_console_setup(struct earlycon_device *device,
|
|
|
|
const char *opt)
|
|
|
|
{
|
|
|
|
return early_console_setup(device, PORT_SCIFB);
|
|
|
|
}
|
|
|
|
static int __init hscif_early_console_setup(struct earlycon_device *device,
|
|
|
|
const char *opt)
|
|
|
|
{
|
|
|
|
return early_console_setup(device, PORT_HSCIF);
|
|
|
|
}
|
|
|
|
|
|
|
|
OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
|
|
|
|
OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
|
|
|
|
OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
|
|
|
|
OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
|
|
|
|
OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
|
|
|
|
#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
module_init(sci_init);
|
|
|
|
module_exit(sci_exit);
|
|
|
|
|
2006-09-27 07:32:13 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
2008-04-15 21:34:35 +00:00
|
|
|
MODULE_ALIAS("platform:sh-sci");
|
2011-06-28 04:47:40 +00:00
|
|
|
MODULE_AUTHOR("Paul Mundt");
|
2013-05-31 15:57:01 +00:00
|
|
|
MODULE_DESCRIPTION("SuperH (H)SCI(F) serial driver");
|