2020-04-03 07:37:41 +00:00
|
|
|
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
|
2007-11-16 23:53:52 +00:00
|
|
|
/*
|
|
|
|
* bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
|
|
|
|
*
|
2017-04-25 06:19:42 +00:00
|
|
|
* Copyright (c) 2002-2017 Volkswagen Group Electronic Research
|
2007-11-16 23:53:52 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. Neither the name of Volkswagen nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* Alternatively, provided that this notice is retained in full, this
|
|
|
|
* software may be distributed under the terms of the GNU General
|
|
|
|
* Public License ("GPL") version 2, in which case the provisions of the
|
|
|
|
* GPL apply INSTEAD OF those given above.
|
|
|
|
*
|
|
|
|
* The provided data structures and external interfaces from this code
|
|
|
|
* are not restricted to be used by modules with a GPL compatible license.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
|
|
|
* DAMAGE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
2011-06-06 10:43:46 +00:00
|
|
|
#include <linux/interrupt.h>
|
2008-04-16 02:29:14 +00:00
|
|
|
#include <linux/hrtimer.h>
|
2007-11-16 23:53:52 +00:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/proc_fs.h>
|
2009-08-28 09:57:21 +00:00
|
|
|
#include <linux/seq_file.h>
|
2007-11-16 23:53:52 +00:00
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/can.h>
|
|
|
|
#include <linux/can/core.h>
|
2013-01-17 17:43:39 +00:00
|
|
|
#include <linux/can/skb.h>
|
2007-11-16 23:53:52 +00:00
|
|
|
#include <linux/can/bcm.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2007-11-16 23:53:52 +00:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/net_namespace.h>
|
|
|
|
|
2010-08-11 23:12:35 +00:00
|
|
|
/*
|
|
|
|
* To send multiple CAN frame content within TX_SETUP or to filter
|
|
|
|
* CAN messages with multiplex index within RX_SETUP, the number of
|
|
|
|
* different filters is limited to 256 due to the one byte index value.
|
|
|
|
*/
|
|
|
|
#define MAX_NFRAMES 256
|
|
|
|
|
2019-01-13 18:31:43 +00:00
|
|
|
/* limit timers to 400 days for sending/timeouts */
|
|
|
|
#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
/* use of last_frames[index].flags */
|
2007-11-16 23:53:52 +00:00
|
|
|
#define RX_RECV 0x40 /* received data for this element */
|
|
|
|
#define RX_THR 0x80 /* element not been sent due to throttle feature */
|
2016-06-17 13:35:27 +00:00
|
|
|
#define BCM_CAN_FLAGS_MASK 0x3F /* to clean private flags after usage */
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* get best masking value for can_rx_register() for a given single can_id */
|
2008-12-03 23:52:35 +00:00
|
|
|
#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
|
|
|
|
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
|
|
|
|
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
|
2009-07-14 23:12:25 +00:00
|
|
|
MODULE_ALIAS("can-proto-2");
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2021-03-25 12:58:48 +00:00
|
|
|
#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
/*
|
|
|
|
* easy access to the first 64 bit of can(fd)_frame payload. cp->data is
|
|
|
|
* 64 bit aligned so the offset has to be multiples of 8 which is ensured
|
|
|
|
* by the only callers in bcm_rx_cmp_to_index() bcm_rx_handler().
|
|
|
|
*/
|
|
|
|
static inline u64 get_u64(const struct canfd_frame *cp, int offset)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2016-06-17 13:35:27 +00:00
|
|
|
return *(u64 *)(cp->data + offset);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct bcm_op {
|
|
|
|
struct list_head list;
|
|
|
|
int ifindex;
|
|
|
|
canid_t can_id;
|
2010-08-11 23:12:35 +00:00
|
|
|
u32 flags;
|
2007-11-16 23:53:52 +00:00
|
|
|
unsigned long frames_abs, frames_filtered;
|
2015-09-30 11:26:42 +00:00
|
|
|
struct bcm_timeval ival1, ival2;
|
2008-04-16 02:29:14 +00:00
|
|
|
struct hrtimer timer, thrtimer;
|
|
|
|
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
|
2007-11-16 23:53:52 +00:00
|
|
|
int rx_ifindex;
|
2016-06-17 13:35:27 +00:00
|
|
|
int cfsiz;
|
2010-08-11 23:12:35 +00:00
|
|
|
u32 count;
|
|
|
|
u32 nframes;
|
|
|
|
u32 currframe;
|
2016-11-23 13:33:25 +00:00
|
|
|
/* void pointers to arrays of struct can[fd]_frame */
|
|
|
|
void *frames;
|
|
|
|
void *last_frames;
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame sframe;
|
|
|
|
struct canfd_frame last_sframe;
|
2007-11-16 23:53:52 +00:00
|
|
|
struct sock *sk;
|
|
|
|
struct net_device *rx_reg_dev;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct bcm_sock {
|
|
|
|
struct sock sk;
|
|
|
|
int bound;
|
|
|
|
int ifindex;
|
2021-06-05 10:26:35 +00:00
|
|
|
struct list_head notifier;
|
2007-11-16 23:53:52 +00:00
|
|
|
struct list_head rx_ops;
|
|
|
|
struct list_head tx_ops;
|
|
|
|
unsigned long dropped_usr_msgs;
|
|
|
|
struct proc_dir_entry *bcm_proc_read;
|
2010-12-26 06:54:53 +00:00
|
|
|
char procname [32]; /* inode number in decimal with \0 */
|
2007-11-16 23:53:52 +00:00
|
|
|
};
|
|
|
|
|
2021-06-05 10:26:35 +00:00
|
|
|
static LIST_HEAD(bcm_notifier_list);
|
|
|
|
static DEFINE_SPINLOCK(bcm_notifier_lock);
|
|
|
|
static struct bcm_sock *bcm_busy_notifier;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
static inline struct bcm_sock *bcm_sk(const struct sock *sk)
|
|
|
|
{
|
|
|
|
return (struct bcm_sock *)sk;
|
|
|
|
}
|
|
|
|
|
2015-09-30 11:26:42 +00:00
|
|
|
static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
|
|
|
|
{
|
|
|
|
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
|
|
|
|
}
|
|
|
|
|
2019-01-13 18:31:43 +00:00
|
|
|
/* check limitations for timeval provided by user */
|
|
|
|
static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
|
|
|
|
{
|
|
|
|
if ((msg_head->ival1.tv_sec < 0) ||
|
|
|
|
(msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
|
|
|
|
(msg_head->ival1.tv_usec < 0) ||
|
|
|
|
(msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
|
|
|
|
(msg_head->ival2.tv_sec < 0) ||
|
|
|
|
(msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
|
|
|
|
(msg_head->ival2.tv_usec < 0) ||
|
|
|
|
(msg_head->ival2.tv_usec >= USEC_PER_SEC))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
#define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
|
2007-11-16 23:53:52 +00:00
|
|
|
#define OPSIZ sizeof(struct bcm_op)
|
|
|
|
#define MHSIZ sizeof(struct bcm_msg_head)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* procfs functions
|
|
|
|
*/
|
2017-04-26 18:14:34 +00:00
|
|
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
2017-04-25 06:19:42 +00:00
|
|
|
static char *bcm_proc_getifname(struct net *net, char *result, int ifindex)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
if (!ifindex)
|
|
|
|
return "any";
|
|
|
|
|
2009-11-10 07:54:56 +00:00
|
|
|
rcu_read_lock();
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index_rcu(net, ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (dev)
|
2009-11-06 00:23:01 +00:00
|
|
|
strcpy(result, dev->name);
|
|
|
|
else
|
|
|
|
strcpy(result, "???");
|
2009-11-10 07:54:56 +00:00
|
|
|
rcu_read_unlock();
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-11-06 00:23:01 +00:00
|
|
|
return result;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2009-08-28 09:57:21 +00:00
|
|
|
static int bcm_proc_show(struct seq_file *m, void *v)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2009-11-06 00:23:01 +00:00
|
|
|
char ifname[IFNAMSIZ];
|
2017-04-25 06:19:42 +00:00
|
|
|
struct net *net = m->private;
|
|
|
|
struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
|
2007-11-16 23:53:52 +00:00
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
struct bcm_op *op;
|
|
|
|
|
net: convert %p usage to %pK
The %pK format specifier is designed to hide exposed kernel pointers,
specifically via /proc interfaces. Exposing these pointers provides an
easy target for kernel write vulnerabilities, since they reveal the
locations of writable structures containing easily triggerable function
pointers. The behavior of %pK depends on the kptr_restrict sysctl.
If kptr_restrict is set to 0, no deviation from the standard %p behavior
occurs. If kptr_restrict is set to 1, the default, if the current user
(intended to be a reader via seq_printf(), etc.) does not have CAP_SYSLOG
(currently in the LSM tree), kernel pointers using %pK are printed as 0's.
If kptr_restrict is set to 2, kernel pointers using %pK are printed as
0's regardless of privileges. Replacing with 0's was chosen over the
default "(null)", which cannot be parsed by userland %p, which expects
"(nil)".
The supporting code for kptr_restrict and %pK are currently in the -mm
tree. This patch converts users of %p in net/ to %pK. Cases of printing
pointers to the syslog are not covered, since this would eliminate useful
information for postmortem debugging and the reading of the syslog is
already optionally protected by the dmesg_restrict sysctl.
Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
Cc: James Morris <jmorris@namei.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Thomas Graf <tgraf@infradead.org>
Cc: Eugene Teo <eugeneteo@kernel.org>
Cc: Kees Cook <kees.cook@canonical.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Eric Paris <eparis@parisplace.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-05-23 12:17:35 +00:00
|
|
|
seq_printf(m, ">>> socket %pK", sk->sk_socket);
|
|
|
|
seq_printf(m, " / sk %pK", sk);
|
|
|
|
seq_printf(m, " / bo %pK", bo);
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
|
2017-04-25 06:19:42 +00:00
|
|
|
seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex));
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, " <<<\n");
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
list_for_each_entry(op, &bo->rx_ops, list) {
|
|
|
|
|
|
|
|
unsigned long reduction;
|
|
|
|
|
|
|
|
/* print only active entries & prevent division by zero */
|
|
|
|
if (!op->frames_abs)
|
|
|
|
continue;
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
seq_printf(m, "rx_op: %03X %-5s ", op->can_id,
|
2017-04-25 06:19:42 +00:00
|
|
|
bcm_proc_getifname(net, ifname, op->ifindex));
|
2016-06-17 13:35:27 +00:00
|
|
|
|
|
|
|
if (op->flags & CAN_FD_FRAME)
|
|
|
|
seq_printf(m, "(%u)", op->nframes);
|
|
|
|
else
|
|
|
|
seq_printf(m, "[%u]", op->nframes);
|
|
|
|
|
|
|
|
seq_printf(m, "%c ", (op->flags & RX_CHECK_DLC) ? 'd' : ' ');
|
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival1)
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "timeo=%lld ",
|
2016-06-17 13:35:24 +00:00
|
|
|
(long long)ktime_to_us(op->kt_ival1));
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival2)
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "thr=%lld ",
|
2016-06-17 13:35:24 +00:00
|
|
|
(long long)ktime_to_us(op->kt_ival2));
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "# recv %ld (%ld) => reduction: ",
|
2016-06-17 13:35:24 +00:00
|
|
|
op->frames_filtered, op->frames_abs);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
|
|
|
|
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "%s%ld%%\n",
|
2016-06-17 13:35:24 +00:00
|
|
|
(reduction == 100) ? "near " : "", reduction);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(op, &bo->tx_ops, list) {
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
seq_printf(m, "tx_op: %03X %s ", op->can_id,
|
2017-04-25 06:19:42 +00:00
|
|
|
bcm_proc_getifname(net, ifname, op->ifindex));
|
2016-06-17 13:35:27 +00:00
|
|
|
|
|
|
|
if (op->flags & CAN_FD_FRAME)
|
|
|
|
seq_printf(m, "(%u) ", op->nframes);
|
|
|
|
else
|
|
|
|
seq_printf(m, "[%u] ", op->nframes);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival1)
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "t1=%lld ",
|
2016-06-17 13:35:24 +00:00
|
|
|
(long long)ktime_to_us(op->kt_ival1));
|
2008-04-16 02:29:14 +00:00
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival2)
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "t2=%lld ",
|
2016-06-17 13:35:24 +00:00
|
|
|
(long long)ktime_to_us(op->kt_ival2));
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_printf(m, "# sent %ld\n", op->frames_abs);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
2009-08-28 09:57:21 +00:00
|
|
|
seq_putc(m, '\n');
|
|
|
|
return 0;
|
|
|
|
}
|
2017-04-26 18:14:34 +00:00
|
|
|
#endif /* CONFIG_PROC_FS */
|
2009-08-28 09:57:21 +00:00
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/*
|
|
|
|
* bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
|
|
|
|
* of the given bcm tx op
|
|
|
|
*/
|
|
|
|
static void bcm_can_tx(struct bcm_op *op)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct net_device *dev;
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* no target device? => exit */
|
|
|
|
if (!op->ifindex)
|
|
|
|
return;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!dev) {
|
|
|
|
/* RFC: should this bcm_op remove itself here? */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
skb = alloc_skb(op->cfsiz + sizeof(struct can_skb_priv), gfp_any());
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!skb)
|
|
|
|
goto out;
|
|
|
|
|
2013-01-28 08:33:33 +00:00
|
|
|
can_skb_reserve(skb);
|
|
|
|
can_skb_prv(skb)->ifindex = dev->ifindex;
|
2015-06-26 09:58:19 +00:00
|
|
|
can_skb_prv(skb)->skbcnt = 0;
|
2013-01-17 17:43:39 +00:00
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:20 +00:00
|
|
|
skb_put_data(skb, cf, op->cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* send with loopback */
|
|
|
|
skb->dev = dev;
|
2014-01-30 09:11:28 +00:00
|
|
|
can_skb_set_owner(skb, op->sk);
|
2007-11-16 23:53:52 +00:00
|
|
|
can_send(skb, 1);
|
|
|
|
|
|
|
|
/* update statistics */
|
|
|
|
op->currframe++;
|
|
|
|
op->frames_abs++;
|
|
|
|
|
|
|
|
/* reached last frame? */
|
|
|
|
if (op->currframe >= op->nframes)
|
|
|
|
op->currframe = 0;
|
2016-06-17 13:35:24 +00:00
|
|
|
out:
|
2007-11-16 23:53:52 +00:00
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_send_to_user - send a BCM message to the userspace
|
|
|
|
* (consisting of bcm_msg_head + x CAN frames)
|
|
|
|
*/
|
|
|
|
static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *frames, int has_timestamp)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *firstframe;
|
2007-11-16 23:53:52 +00:00
|
|
|
struct sockaddr_can *addr;
|
|
|
|
struct sock *sk = op->sk;
|
2016-06-17 13:35:27 +00:00
|
|
|
unsigned int datalen = head->nframes * op->cfsiz;
|
2007-11-16 23:53:52 +00:00
|
|
|
int err;
|
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:20 +00:00
|
|
|
skb_put_data(skb, head, sizeof(*head));
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (head->nframes) {
|
2016-06-17 13:35:25 +00:00
|
|
|
/* CAN frames starting here */
|
2016-06-17 13:35:27 +00:00
|
|
|
firstframe = (struct canfd_frame *)skb_tail_pointer(skb);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:20 +00:00
|
|
|
skb_put_data(skb, frames, datalen);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/*
|
2016-06-17 13:35:27 +00:00
|
|
|
* the BCM uses the flags-element of the canfd_frame
|
2007-11-16 23:53:52 +00:00
|
|
|
* structure for internal purposes. This is only
|
|
|
|
* relevant for updates that are generated by the
|
|
|
|
* BCM, where nframes is 1
|
|
|
|
*/
|
|
|
|
if (head->nframes == 1)
|
2016-06-17 13:35:27 +00:00
|
|
|
firstframe->flags &= BCM_CAN_FLAGS_MASK;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (has_timestamp) {
|
|
|
|
/* restore rx timestamp */
|
|
|
|
skb->tstamp = op->rx_stamp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put the datagram to the queue so that bcm_recvmsg() can
|
|
|
|
* get it from there. We need to pass the interface index to
|
|
|
|
* bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
|
|
|
|
* containing the interface index.
|
|
|
|
*/
|
|
|
|
|
2015-03-01 12:58:29 +00:00
|
|
|
sock_skb_cb_check_size(sizeof(struct sockaddr_can));
|
2007-11-16 23:53:52 +00:00
|
|
|
addr = (struct sockaddr_can *)skb->cb;
|
|
|
|
memset(addr, 0, sizeof(*addr));
|
|
|
|
addr->can_family = AF_CAN;
|
|
|
|
addr->can_ifindex = op->rx_ifindex;
|
|
|
|
|
|
|
|
err = sock_queue_rcv_skb(sk, skb);
|
|
|
|
if (err < 0) {
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
/* don't care about overflows in this statistic */
|
|
|
|
bo->dropped_usr_msgs++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
|
2011-09-29 19:33:47 +00:00
|
|
|
{
|
2019-08-12 12:57:14 +00:00
|
|
|
ktime_t ival;
|
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival1 && op->count)
|
2019-08-12 12:57:14 +00:00
|
|
|
ival = op->kt_ival1;
|
2016-12-25 10:38:40 +00:00
|
|
|
else if (op->kt_ival2)
|
2019-08-12 12:57:14 +00:00
|
|
|
ival = op->kt_ival2;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
|
|
|
|
return true;
|
2011-09-29 19:33:47 +00:00
|
|
|
}
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
static void bcm_tx_start_timer(struct bcm_op *op)
|
2009-01-05 01:31:18 +00:00
|
|
|
{
|
2019-08-12 12:57:14 +00:00
|
|
|
if (bcm_tx_set_expiry(op, &op->timer))
|
|
|
|
hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
|
|
|
|
static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
|
|
|
|
{
|
|
|
|
struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
|
2009-01-05 01:31:18 +00:00
|
|
|
struct bcm_msg_head msg_head;
|
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival1 && (op->count > 0)) {
|
2007-11-16 23:53:52 +00:00
|
|
|
op->count--;
|
2009-01-15 05:06:55 +00:00
|
|
|
if (!op->count && (op->flags & TX_COUNTEVT)) {
|
|
|
|
|
|
|
|
/* create notification to user */
|
2021-06-12 20:18:54 +00:00
|
|
|
memset(&msg_head, 0, sizeof(msg_head));
|
2009-01-15 05:06:55 +00:00
|
|
|
msg_head.opcode = TX_EXPIRED;
|
|
|
|
msg_head.flags = op->flags;
|
|
|
|
msg_head.count = op->count;
|
|
|
|
msg_head.ival1 = op->ival1;
|
|
|
|
msg_head.ival2 = op->ival2;
|
|
|
|
msg_head.can_id = op->can_id;
|
|
|
|
msg_head.nframes = 0;
|
|
|
|
|
|
|
|
bcm_send_to_user(op, &msg_head, NULL, 0);
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
bcm_can_tx(op);
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
} else if (op->kt_ival2) {
|
2011-09-29 19:33:47 +00:00
|
|
|
bcm_can_tx(op);
|
2019-08-12 12:57:14 +00:00
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
return bcm_tx_set_expiry(op, &op->timer) ?
|
|
|
|
HRTIMER_RESTART : HRTIMER_NORESTART;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_rx_changed - create a RX_CHANGED notification due to changed content
|
|
|
|
*/
|
2016-06-17 13:35:27 +00:00
|
|
|
static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct bcm_msg_head head;
|
|
|
|
|
|
|
|
/* update statistics */
|
|
|
|
op->frames_filtered++;
|
|
|
|
|
|
|
|
/* prevent statistics overflow */
|
|
|
|
if (op->frames_filtered > ULONG_MAX/100)
|
|
|
|
op->frames_filtered = op->frames_abs = 0;
|
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* this element is not throttled anymore */
|
2016-06-17 13:35:27 +00:00
|
|
|
data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
|
2009-01-05 01:31:18 +00:00
|
|
|
|
2021-06-12 20:18:54 +00:00
|
|
|
memset(&head, 0, sizeof(head));
|
2007-11-16 23:53:52 +00:00
|
|
|
head.opcode = RX_CHANGED;
|
|
|
|
head.flags = op->flags;
|
|
|
|
head.count = op->count;
|
|
|
|
head.ival1 = op->ival1;
|
|
|
|
head.ival2 = op->ival2;
|
|
|
|
head.can_id = op->can_id;
|
|
|
|
head.nframes = 1;
|
|
|
|
|
|
|
|
bcm_send_to_user(op, &head, data, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_rx_update_and_send - process a detected relevant receive content change
|
|
|
|
* 1. update the last received data
|
|
|
|
* 2. send a notification to the user (if possible)
|
|
|
|
*/
|
|
|
|
static void bcm_rx_update_and_send(struct bcm_op *op,
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *lastdata,
|
|
|
|
const struct canfd_frame *rxdata)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2016-06-17 13:35:27 +00:00
|
|
|
memcpy(lastdata, rxdata, op->cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* mark as used and throttled by default */
|
2016-06-17 13:35:27 +00:00
|
|
|
lastdata->flags |= (RX_RECV|RX_THR);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2014-12-05 17:54:38 +00:00
|
|
|
/* throttling mode inactive ? */
|
2016-12-25 10:38:40 +00:00
|
|
|
if (!op->kt_ival2) {
|
2008-04-16 02:29:14 +00:00
|
|
|
/* send RX_CHANGED to the user immediately */
|
2009-01-05 01:31:18 +00:00
|
|
|
bcm_rx_changed(op, lastdata);
|
2008-04-16 02:29:14 +00:00
|
|
|
return;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* with active throttling timer we are just done here */
|
|
|
|
if (hrtimer_active(&op->thrtimer))
|
2008-04-16 02:29:14 +00:00
|
|
|
return;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2014-12-05 17:54:38 +00:00
|
|
|
/* first reception with enabled throttling mode */
|
2016-12-25 10:38:40 +00:00
|
|
|
if (!op->kt_lastmsg)
|
2009-01-05 01:31:18 +00:00
|
|
|
goto rx_changed_settime;
|
2008-04-16 02:29:14 +00:00
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* got a second frame inside a potential throttle period? */
|
2008-04-16 02:29:14 +00:00
|
|
|
if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
|
|
|
|
ktime_to_us(op->kt_ival2)) {
|
2009-01-05 01:31:18 +00:00
|
|
|
/* do not send the saved data - only start throttle timer */
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_start(&op->thrtimer,
|
|
|
|
ktime_add(op->kt_lastmsg, op->kt_ival2),
|
2019-08-12 12:57:14 +00:00
|
|
|
HRTIMER_MODE_ABS_SOFT);
|
2008-04-16 02:29:14 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the gap was that big, that throttling was not needed here */
|
2009-01-05 01:31:18 +00:00
|
|
|
rx_changed_settime:
|
|
|
|
bcm_rx_changed(op, lastdata);
|
2008-04-16 02:29:14 +00:00
|
|
|
op->kt_lastmsg = ktime_get();
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
|
|
|
|
* received data stored in op->last_frames[]
|
|
|
|
*/
|
2010-08-11 23:12:35 +00:00
|
|
|
static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
|
2016-06-17 13:35:27 +00:00
|
|
|
const struct canfd_frame *rxdata)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *cf = op->frames + op->cfsiz * index;
|
|
|
|
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
|
|
|
|
int i;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/*
|
2016-06-17 13:35:27 +00:00
|
|
|
* no one uses the MSBs of flags for comparison,
|
2007-11-16 23:53:52 +00:00
|
|
|
* so we use it here to detect the first time of reception
|
|
|
|
*/
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
if (!(lcf->flags & RX_RECV)) {
|
2007-11-16 23:53:52 +00:00
|
|
|
/* received data for the first time => send update to user */
|
2016-06-17 13:35:27 +00:00
|
|
|
bcm_rx_update_and_send(op, lcf, rxdata);
|
2007-11-16 23:53:52 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-06-17 13:35:25 +00:00
|
|
|
/* do a real check in CAN frame data section */
|
2016-06-17 13:35:27 +00:00
|
|
|
for (i = 0; i < rxdata->len; i += 8) {
|
|
|
|
if ((get_u64(cf, i) & get_u64(rxdata, i)) !=
|
|
|
|
(get_u64(cf, i) & get_u64(lcf, i))) {
|
|
|
|
bcm_rx_update_and_send(op, lcf, rxdata);
|
|
|
|
return;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->flags & RX_CHECK_DLC) {
|
2016-06-17 13:35:27 +00:00
|
|
|
/* do a real check in CAN frame length */
|
|
|
|
if (rxdata->len != lcf->len) {
|
|
|
|
bcm_rx_update_and_send(op, lcf, rxdata);
|
2007-11-16 23:53:52 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-12-05 17:54:38 +00:00
|
|
|
* bcm_rx_starttimer - enable timeout monitoring for CAN frame reception
|
2007-11-16 23:53:52 +00:00
|
|
|
*/
|
|
|
|
static void bcm_rx_starttimer(struct bcm_op *op)
|
|
|
|
{
|
|
|
|
if (op->flags & RX_NO_AUTOTIMER)
|
|
|
|
return;
|
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if (op->kt_ival1)
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
/* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
|
|
|
|
static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2019-08-12 12:57:14 +00:00
|
|
|
struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
|
2007-11-16 23:53:52 +00:00
|
|
|
struct bcm_msg_head msg_head;
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
/* if user wants to be informed, when cyclic CAN-Messages come back */
|
|
|
|
if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
|
|
|
|
/* clear received CAN frames to indicate 'nothing received' */
|
|
|
|
memset(op->last_frames, 0, op->nframes * op->cfsiz);
|
|
|
|
}
|
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* create notification to user */
|
2021-06-12 20:18:54 +00:00
|
|
|
memset(&msg_head, 0, sizeof(msg_head));
|
2007-11-16 23:53:52 +00:00
|
|
|
msg_head.opcode = RX_TIMEOUT;
|
|
|
|
msg_head.flags = op->flags;
|
|
|
|
msg_head.count = op->count;
|
|
|
|
msg_head.ival1 = op->ival1;
|
|
|
|
msg_head.ival2 = op->ival2;
|
|
|
|
msg_head.can_id = op->can_id;
|
|
|
|
msg_head.nframes = 0;
|
|
|
|
|
|
|
|
bcm_send_to_user(op, &msg_head, NULL, 0);
|
2008-04-16 02:29:14 +00:00
|
|
|
|
|
|
|
return HRTIMER_NORESTART;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/*
|
|
|
|
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
|
|
|
|
*/
|
2019-08-12 12:57:14 +00:00
|
|
|
static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
|
2009-01-05 01:31:18 +00:00
|
|
|
{
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
|
|
|
|
|
|
|
|
if ((op->last_frames) && (lcf->flags & RX_THR)) {
|
2019-08-12 12:57:14 +00:00
|
|
|
bcm_rx_changed(op, lcf);
|
2009-01-05 01:31:18 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/*
|
2008-04-16 02:29:14 +00:00
|
|
|
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
|
2007-11-16 23:53:52 +00:00
|
|
|
*/
|
2019-08-12 12:57:14 +00:00
|
|
|
static int bcm_rx_thr_flush(struct bcm_op *op)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
2008-04-16 02:29:14 +00:00
|
|
|
int updated = 0;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (op->nframes > 1) {
|
2010-08-11 23:12:35 +00:00
|
|
|
unsigned int i;
|
2008-04-16 02:29:14 +00:00
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* for MUX filter we start at index 1 */
|
2009-01-05 01:31:18 +00:00
|
|
|
for (i = 1; i < op->nframes; i++)
|
2019-08-12 12:57:14 +00:00
|
|
|
updated += bcm_rx_do_flush(op, i);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
/* for RX_FILTER_ID and simple filter */
|
2019-08-12 12:57:14 +00:00
|
|
|
updated += bcm_rx_do_flush(op, 0);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
2008-04-16 02:29:14 +00:00
|
|
|
|
|
|
|
return updated;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_rx_thr_handler - the time for blocked content updates is over now:
|
|
|
|
* Check for throttled data and send it to the userspace
|
|
|
|
*/
|
|
|
|
static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
|
|
|
|
{
|
|
|
|
struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
|
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
if (bcm_rx_thr_flush(op)) {
|
2021-09-23 16:04:27 +00:00
|
|
|
hrtimer_forward_now(hrtimer, op->kt_ival2);
|
2008-04-16 02:29:14 +00:00
|
|
|
return HRTIMER_RESTART;
|
|
|
|
} else {
|
|
|
|
/* rearm throttle handling */
|
2016-12-25 11:30:41 +00:00
|
|
|
op->kt_lastmsg = 0;
|
2008-04-16 02:29:14 +00:00
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2014-12-05 17:54:38 +00:00
|
|
|
* bcm_rx_handler - handle a CAN frame reception
|
2007-11-16 23:53:52 +00:00
|
|
|
*/
|
|
|
|
static void bcm_rx_handler(struct sk_buff *skb, void *data)
|
|
|
|
{
|
|
|
|
struct bcm_op *op = (struct bcm_op *)data;
|
2016-06-17 13:35:27 +00:00
|
|
|
const struct canfd_frame *rxframe = (struct canfd_frame *)skb->data;
|
2010-08-11 23:12:35 +00:00
|
|
|
unsigned int i;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
if (op->can_id != rxframe->can_id)
|
2009-01-06 19:07:54 +00:00
|
|
|
return;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
/* make sure to handle the correct frame type (CAN / CAN FD) */
|
|
|
|
if (skb->len != op->cfsiz)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* disable timeout */
|
|
|
|
hrtimer_cancel(&op->timer);
|
|
|
|
|
2009-01-05 01:31:18 +00:00
|
|
|
/* save rx timestamp */
|
|
|
|
op->rx_stamp = skb->tstamp;
|
|
|
|
/* save originator for recvfrom() */
|
|
|
|
op->rx_ifindex = skb->dev->ifindex;
|
|
|
|
/* update statistics */
|
|
|
|
op->frames_abs++;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (op->flags & RX_RTR_FRAME) {
|
|
|
|
/* send reply for RTR-request (placed in op->frames[0]) */
|
|
|
|
bcm_can_tx(op);
|
2009-01-06 19:07:54 +00:00
|
|
|
return;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->flags & RX_FILTER_ID) {
|
|
|
|
/* the easiest case */
|
2016-11-23 13:33:25 +00:00
|
|
|
bcm_rx_update_and_send(op, op->last_frames, rxframe);
|
2009-01-06 19:07:54 +00:00
|
|
|
goto rx_starttimer;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->nframes == 1) {
|
|
|
|
/* simple compare with index 0 */
|
2009-01-05 01:31:18 +00:00
|
|
|
bcm_rx_cmp_to_index(op, 0, rxframe);
|
2009-01-06 19:07:54 +00:00
|
|
|
goto rx_starttimer;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (op->nframes > 1) {
|
|
|
|
/*
|
|
|
|
* multiplex compare
|
|
|
|
*
|
|
|
|
* find the first multiplex mask that fits.
|
2016-06-17 13:35:27 +00:00
|
|
|
* Remark: The MUX-mask is stored in index 0 - but only the
|
|
|
|
* first 64 bits of the frame data[] are relevant (CAN FD)
|
2007-11-16 23:53:52 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
for (i = 1; i < op->nframes; i++) {
|
2016-06-17 13:35:27 +00:00
|
|
|
if ((get_u64(op->frames, 0) & get_u64(rxframe, 0)) ==
|
|
|
|
(get_u64(op->frames, 0) &
|
|
|
|
get_u64(op->frames + op->cfsiz * i, 0))) {
|
2009-01-05 01:31:18 +00:00
|
|
|
bcm_rx_cmp_to_index(op, i, rxframe);
|
2007-11-16 23:53:52 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-01-05 01:31:18 +00:00
|
|
|
|
2009-01-06 19:07:54 +00:00
|
|
|
rx_starttimer:
|
2009-01-05 01:31:18 +00:00
|
|
|
bcm_rx_starttimer(op);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
|
|
|
|
*/
|
2016-06-17 13:35:26 +00:00
|
|
|
static struct bcm_op *bcm_find_op(struct list_head *ops,
|
|
|
|
struct bcm_msg_head *mh, int ifindex)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct bcm_op *op;
|
|
|
|
|
|
|
|
list_for_each_entry(op, ops, list) {
|
2016-06-17 13:35:27 +00:00
|
|
|
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
|
|
|
|
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME))
|
2007-11-16 23:53:52 +00:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_remove_op(struct bcm_op *op)
|
|
|
|
{
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_cancel(&op->timer);
|
|
|
|
hrtimer_cancel(&op->thrtimer);
|
2009-01-05 01:31:18 +00:00
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
if ((op->frames) && (op->frames != &op->sframe))
|
|
|
|
kfree(op->frames);
|
|
|
|
|
|
|
|
if ((op->last_frames) && (op->last_frames != &op->last_sframe))
|
|
|
|
kfree(op->last_frames);
|
|
|
|
|
|
|
|
kfree(op);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
|
|
|
|
{
|
|
|
|
if (op->rx_reg_dev == dev) {
|
2017-04-25 06:19:42 +00:00
|
|
|
can_rx_unregister(dev_net(dev), dev, op->can_id,
|
2017-02-21 11:19:47 +00:00
|
|
|
REGMASK(op->can_id), bcm_rx_handler, op);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* mark as removed subscription */
|
|
|
|
op->rx_reg_dev = NULL;
|
|
|
|
} else
|
|
|
|
printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
|
|
|
|
"mismatch %p %p\n", op->rx_reg_dev, dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
|
|
|
|
*/
|
2016-06-17 13:35:26 +00:00
|
|
|
static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh,
|
|
|
|
int ifindex)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct bcm_op *op, *n;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, n, ops, list) {
|
2016-06-17 13:35:27 +00:00
|
|
|
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
|
|
|
|
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't care if we're bound or not (due to netdev
|
|
|
|
* problems) can_rx_unregister() is always a save
|
|
|
|
* thing to do here.
|
|
|
|
*/
|
|
|
|
if (op->ifindex) {
|
|
|
|
/*
|
|
|
|
* Only remove subscriptions that had not
|
|
|
|
* been removed due to NETDEV_UNREGISTER
|
|
|
|
* in bcm_notifier()
|
|
|
|
*/
|
|
|
|
if (op->rx_reg_dev) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(sock_net(op->sk),
|
2007-11-16 23:53:52 +00:00
|
|
|
op->ifindex);
|
|
|
|
if (dev) {
|
|
|
|
bcm_rx_unreg(dev, op);
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
2017-04-25 06:19:42 +00:00
|
|
|
can_rx_unregister(sock_net(op->sk), NULL,
|
|
|
|
op->can_id,
|
2007-11-16 23:53:52 +00:00
|
|
|
REGMASK(op->can_id),
|
|
|
|
bcm_rx_handler, op);
|
|
|
|
|
|
|
|
list_del(&op->list);
|
can: bcm: delay release of struct bcm_op after synchronize_rcu()
can_rx_register() callbacks may be called concurrently to the call to
can_rx_unregister(). The callbacks and callback data, though, are
protected by RCU and the struct sock reference count.
So the callback data is really attached to the life of sk, meaning
that it should be released on sk_destruct. However, bcm_remove_op()
calls tasklet_kill(), and RCU callbacks may be called under RCU
softirq, so that cannot be used on kernels before the introduction of
HRTIMER_MODE_SOFT.
However, bcm_rx_handler() is called under RCU protection, so after
calling can_rx_unregister(), we may call synchronize_rcu() in order to
wait for any RCU read-side critical sections to finish. That is,
bcm_rx_handler() won't be called anymore for those ops. So, we only
free them, after we do that synchronize_rcu().
Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol")
Link: https://lore.kernel.org/r/20210619161813.2098382-1-cascardo@canonical.com
Cc: linux-stable <stable@vger.kernel.org>
Reported-by: syzbot+0f7e7e5e2f4f40fa89c0@syzkaller.appspotmail.com
Reported-by: Norbert Slusarek <nslusarek@gmx.net>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2021-06-19 16:18:13 +00:00
|
|
|
synchronize_rcu();
|
2007-11-16 23:53:52 +00:00
|
|
|
bcm_remove_op(op);
|
|
|
|
return 1; /* done */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0; /* not found */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
|
|
|
|
*/
|
2016-06-17 13:35:26 +00:00
|
|
|
static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh,
|
|
|
|
int ifindex)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct bcm_op *op, *n;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, n, ops, list) {
|
2016-06-17 13:35:27 +00:00
|
|
|
if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) &&
|
|
|
|
(op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) {
|
2007-11-16 23:53:52 +00:00
|
|
|
list_del(&op->list);
|
|
|
|
bcm_remove_op(op);
|
|
|
|
return 1; /* done */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0; /* not found */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
|
|
|
|
*/
|
|
|
|
static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
|
|
|
|
int ifindex)
|
|
|
|
{
|
2016-06-17 13:35:26 +00:00
|
|
|
struct bcm_op *op = bcm_find_op(ops, msg_head, ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (!op)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* put current values into msg_head */
|
|
|
|
msg_head->flags = op->flags;
|
|
|
|
msg_head->count = op->count;
|
|
|
|
msg_head->ival1 = op->ival1;
|
|
|
|
msg_head->ival2 = op->ival2;
|
|
|
|
msg_head->nframes = op->nframes;
|
|
|
|
|
|
|
|
bcm_send_to_user(op, msg_head, op->frames, 0);
|
|
|
|
|
|
|
|
return MHSIZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
|
|
|
|
*/
|
|
|
|
static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|
|
|
int ifindex, struct sock *sk)
|
|
|
|
{
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
struct bcm_op *op;
|
2016-06-17 13:35:27 +00:00
|
|
|
struct canfd_frame *cf;
|
2010-08-11 23:12:35 +00:00
|
|
|
unsigned int i;
|
|
|
|
int err;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* we need a real device to send frames */
|
|
|
|
if (!ifindex)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-06-17 13:35:25 +00:00
|
|
|
/* check nframes boundaries - we need at least one CAN frame */
|
2010-08-11 23:12:35 +00:00
|
|
|
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
|
2007-11-16 23:53:52 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-13 18:31:43 +00:00
|
|
|
/* check timeval limitations */
|
|
|
|
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* check the given can_id */
|
2016-06-17 13:35:26 +00:00
|
|
|
op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (op) {
|
|
|
|
/* update existing BCM operation */
|
|
|
|
|
|
|
|
/*
|
2016-06-17 13:35:25 +00:00
|
|
|
* Do we need more space for the CAN frames than currently
|
2007-11-16 23:53:52 +00:00
|
|
|
* allocated? -> This is a _really_ unusual use-case and
|
|
|
|
* therefore (complexity / locking) it is not supported.
|
|
|
|
*/
|
|
|
|
if (msg_head->nframes > op->nframes)
|
|
|
|
return -E2BIG;
|
|
|
|
|
2016-06-17 13:35:25 +00:00
|
|
|
/* update CAN frames content */
|
2007-11-16 23:53:52 +00:00
|
|
|
for (i = 0; i < msg_head->nframes; i++) {
|
2008-07-06 06:38:43 +00:00
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
cf = op->frames + op->cfsiz * i;
|
|
|
|
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
|
|
|
|
|
|
|
|
if (op->flags & CAN_FD_FRAME) {
|
|
|
|
if (cf->len > 64)
|
|
|
|
err = -EINVAL;
|
|
|
|
} else {
|
|
|
|
if (cf->len > 8)
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2008-07-06 06:38:43 +00:00
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
if (msg_head->flags & TX_CP_CAN_ID) {
|
|
|
|
/* copy can_id into frame */
|
2016-06-17 13:35:27 +00:00
|
|
|
cf->can_id = msg_head->can_id;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
}
|
2016-06-17 13:35:27 +00:00
|
|
|
op->flags = msg_head->flags;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
/* insert new BCM operation for the given can_id */
|
|
|
|
|
|
|
|
op = kzalloc(OPSIZ, GFP_KERNEL);
|
|
|
|
if (!op)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
op->can_id = msg_head->can_id;
|
|
|
|
op->cfsiz = CFSIZ(msg_head->flags);
|
|
|
|
op->flags = msg_head->flags;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2016-06-17 13:35:25 +00:00
|
|
|
/* create array for CAN frames and copy the data */
|
2007-11-16 23:53:52 +00:00
|
|
|
if (msg_head->nframes > 1) {
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
op->frames = kmalloc_array(msg_head->nframes,
|
|
|
|
op->cfsiz,
|
|
|
|
GFP_KERNEL);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!op->frames) {
|
|
|
|
kfree(op);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
op->frames = &op->sframe;
|
|
|
|
|
|
|
|
for (i = 0; i < msg_head->nframes; i++) {
|
2008-07-06 06:38:43 +00:00
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
cf = op->frames + op->cfsiz * i;
|
|
|
|
err = memcpy_from_msg((u8 *)cf, msg, op->cfsiz);
|
|
|
|
|
|
|
|
if (op->flags & CAN_FD_FRAME) {
|
|
|
|
if (cf->len > 64)
|
|
|
|
err = -EINVAL;
|
|
|
|
} else {
|
|
|
|
if (cf->len > 8)
|
|
|
|
err = -EINVAL;
|
|
|
|
}
|
2008-07-06 06:38:43 +00:00
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0) {
|
|
|
|
if (op->frames != &op->sframe)
|
|
|
|
kfree(op->frames);
|
|
|
|
kfree(op);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg_head->flags & TX_CP_CAN_ID) {
|
|
|
|
/* copy can_id into frame */
|
2016-06-17 13:35:27 +00:00
|
|
|
cf->can_id = msg_head->can_id;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* tx_ops never compare with previous received messages */
|
|
|
|
op->last_frames = NULL;
|
|
|
|
|
|
|
|
/* bcm_can_tx / bcm_tx_timeout_handler needs this */
|
|
|
|
op->sk = sk;
|
|
|
|
op->ifindex = ifindex;
|
|
|
|
|
|
|
|
/* initialize uninitialized (kzalloc) structure */
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_init(&op->timer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL_SOFT);
|
2008-04-16 02:29:14 +00:00
|
|
|
op->timer.function = bcm_tx_timeout_handler;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* currently unused in tx_ops */
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL_SOFT);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* add this bcm_op to the list of the tx_ops */
|
|
|
|
list_add(&op->list, &bo->tx_ops);
|
|
|
|
|
|
|
|
} /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
|
|
|
|
|
|
|
|
if (op->nframes != msg_head->nframes) {
|
|
|
|
op->nframes = msg_head->nframes;
|
|
|
|
/* start multiple frame transmission with index 0 */
|
|
|
|
op->currframe = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check flags */
|
|
|
|
|
|
|
|
if (op->flags & TX_RESET_MULTI_IDX) {
|
|
|
|
/* start multiple frame transmission with index 0 */
|
|
|
|
op->currframe = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (op->flags & SETTIMER) {
|
|
|
|
/* set timer values */
|
|
|
|
op->count = msg_head->count;
|
|
|
|
op->ival1 = msg_head->ival1;
|
|
|
|
op->ival2 = msg_head->ival2;
|
2015-09-30 11:26:42 +00:00
|
|
|
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
|
|
|
|
op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* disable an active timer due to zero values? */
|
2016-12-25 10:38:40 +00:00
|
|
|
if (!op->kt_ival1 && !op->kt_ival2)
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_cancel(&op->timer);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2011-09-29 19:33:47 +00:00
|
|
|
if (op->flags & STARTTIMER) {
|
|
|
|
hrtimer_cancel(&op->timer);
|
2016-06-17 13:35:25 +00:00
|
|
|
/* spec: send CAN frame when starting timer */
|
2007-11-16 23:53:52 +00:00
|
|
|
op->flags |= TX_ANNOUNCE;
|
|
|
|
}
|
|
|
|
|
2011-09-23 08:23:47 +00:00
|
|
|
if (op->flags & TX_ANNOUNCE) {
|
2007-11-16 23:53:52 +00:00
|
|
|
bcm_can_tx(op);
|
2011-09-29 19:33:47 +00:00
|
|
|
if (op->count)
|
2011-09-23 08:23:47 +00:00
|
|
|
op->count--;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2011-09-29 19:33:47 +00:00
|
|
|
if (op->flags & STARTTIMER)
|
|
|
|
bcm_tx_start_timer(op);
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
return msg_head->nframes * op->cfsiz + MHSIZ;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
|
|
|
|
*/
|
|
|
|
static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
|
|
|
|
int ifindex, struct sock *sk)
|
|
|
|
{
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
struct bcm_op *op;
|
|
|
|
int do_rx_register;
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
|
|
|
|
/* be robust against wrong usage ... */
|
|
|
|
msg_head->flags |= RX_FILTER_ID;
|
|
|
|
/* ignore trailing garbage */
|
|
|
|
msg_head->nframes = 0;
|
|
|
|
}
|
|
|
|
|
2010-08-11 23:12:35 +00:00
|
|
|
/* the first element contains the mux-mask => MAX_NFRAMES + 1 */
|
|
|
|
if (msg_head->nframes > MAX_NFRAMES + 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
if ((msg_head->flags & RX_RTR_FRAME) &&
|
|
|
|
((msg_head->nframes != 1) ||
|
|
|
|
(!(msg_head->can_id & CAN_RTR_FLAG))))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-01-13 18:31:43 +00:00
|
|
|
/* check timeval limitations */
|
|
|
|
if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* check the given can_id */
|
2016-06-17 13:35:26 +00:00
|
|
|
op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (op) {
|
|
|
|
/* update existing BCM operation */
|
|
|
|
|
|
|
|
/*
|
2016-06-17 13:35:25 +00:00
|
|
|
* Do we need more space for the CAN frames than currently
|
2007-11-16 23:53:52 +00:00
|
|
|
* allocated? -> This is a _really_ unusual use-case and
|
|
|
|
* therefore (complexity / locking) it is not supported.
|
|
|
|
*/
|
|
|
|
if (msg_head->nframes > op->nframes)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
if (msg_head->nframes) {
|
2016-06-17 13:35:25 +00:00
|
|
|
/* update CAN frames content */
|
2016-11-23 13:33:25 +00:00
|
|
|
err = memcpy_from_msg(op->frames, msg,
|
2016-06-17 13:35:27 +00:00
|
|
|
msg_head->nframes * op->cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
/* clear last_frames to indicate 'nothing received' */
|
2016-06-17 13:35:27 +00:00
|
|
|
memset(op->last_frames, 0, msg_head->nframes * op->cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
op->nframes = msg_head->nframes;
|
2016-06-17 13:35:27 +00:00
|
|
|
op->flags = msg_head->flags;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* Only an update -> do not call can_rx_register() */
|
|
|
|
do_rx_register = 0;
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* insert new BCM operation for the given can_id */
|
|
|
|
op = kzalloc(OPSIZ, GFP_KERNEL);
|
|
|
|
if (!op)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
op->can_id = msg_head->can_id;
|
|
|
|
op->nframes = msg_head->nframes;
|
|
|
|
op->cfsiz = CFSIZ(msg_head->flags);
|
|
|
|
op->flags = msg_head->flags;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (msg_head->nframes > 1) {
|
2016-06-17 13:35:25 +00:00
|
|
|
/* create array for CAN frames and copy the data */
|
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 20:55:00 +00:00
|
|
|
op->frames = kmalloc_array(msg_head->nframes,
|
|
|
|
op->cfsiz,
|
|
|
|
GFP_KERNEL);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!op->frames) {
|
|
|
|
kfree(op);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-06-17 13:35:25 +00:00
|
|
|
/* create and init array for received CAN frames */
|
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 21:03:40 +00:00
|
|
|
op->last_frames = kcalloc(msg_head->nframes,
|
|
|
|
op->cfsiz,
|
2007-11-16 23:53:52 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!op->last_frames) {
|
|
|
|
kfree(op->frames);
|
|
|
|
kfree(op);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
op->frames = &op->sframe;
|
|
|
|
op->last_frames = &op->last_sframe;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (msg_head->nframes) {
|
2016-11-23 13:33:25 +00:00
|
|
|
err = memcpy_from_msg(op->frames, msg,
|
2016-06-17 13:35:27 +00:00
|
|
|
msg_head->nframes * op->cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0) {
|
|
|
|
if (op->frames != &op->sframe)
|
|
|
|
kfree(op->frames);
|
|
|
|
if (op->last_frames != &op->last_sframe)
|
|
|
|
kfree(op->last_frames);
|
|
|
|
kfree(op);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* bcm_can_tx / bcm_tx_timeout_handler needs this */
|
|
|
|
op->sk = sk;
|
|
|
|
op->ifindex = ifindex;
|
|
|
|
|
2012-11-26 21:24:23 +00:00
|
|
|
/* ifindex for timeout events w/o previous frame reception */
|
|
|
|
op->rx_ifindex = ifindex;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* initialize uninitialized (kzalloc) structure */
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_init(&op->timer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL_SOFT);
|
2008-04-16 02:29:14 +00:00
|
|
|
op->timer.function = bcm_rx_timeout_handler;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2019-08-12 12:57:14 +00:00
|
|
|
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL_SOFT);
|
2008-04-16 02:29:14 +00:00
|
|
|
op->thrtimer.function = bcm_rx_thr_handler;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* add this bcm_op to the list of the rx_ops */
|
|
|
|
list_add(&op->list, &bo->rx_ops);
|
|
|
|
|
|
|
|
/* call can_rx_register() */
|
|
|
|
do_rx_register = 1;
|
|
|
|
|
|
|
|
} /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
|
|
|
|
|
|
|
|
/* check flags */
|
|
|
|
|
|
|
|
if (op->flags & RX_RTR_FRAME) {
|
2016-11-23 13:33:25 +00:00
|
|
|
struct canfd_frame *frame0 = op->frames;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* no timers in RTR-mode */
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_cancel(&op->thrtimer);
|
|
|
|
hrtimer_cancel(&op->timer);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* funny feature in RX(!)_SETUP only for RTR-mode:
|
|
|
|
* copy can_id into frame BUT without RTR-flag to
|
|
|
|
* prevent a full-load-loopback-test ... ;-]
|
|
|
|
*/
|
|
|
|
if ((op->flags & TX_CP_CAN_ID) ||
|
2016-11-23 13:33:25 +00:00
|
|
|
(frame0->can_id == op->can_id))
|
|
|
|
frame0->can_id = op->can_id & ~CAN_RTR_FLAG;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
if (op->flags & SETTIMER) {
|
|
|
|
|
|
|
|
/* set timer value */
|
|
|
|
op->ival1 = msg_head->ival1;
|
|
|
|
op->ival2 = msg_head->ival2;
|
2015-09-30 11:26:42 +00:00
|
|
|
op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1);
|
|
|
|
op->kt_ival2 = bcm_timeval_to_ktime(msg_head->ival2);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* disable an active timer due to zero value? */
|
2016-12-25 10:38:40 +00:00
|
|
|
if (!op->kt_ival1)
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_cancel(&op->timer);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/*
|
2008-04-16 02:29:14 +00:00
|
|
|
* In any case cancel the throttle timer, flush
|
|
|
|
* potentially blocked msgs and reset throttle handling
|
2007-11-16 23:53:52 +00:00
|
|
|
*/
|
2016-12-25 11:30:41 +00:00
|
|
|
op->kt_lastmsg = 0;
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_cancel(&op->thrtimer);
|
2019-08-12 12:57:14 +00:00
|
|
|
bcm_rx_thr_flush(op);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2016-12-25 10:38:40 +00:00
|
|
|
if ((op->flags & STARTTIMER) && op->kt_ival1)
|
2008-04-16 02:29:14 +00:00
|
|
|
hrtimer_start(&op->timer, op->kt_ival1,
|
2019-08-12 12:57:14 +00:00
|
|
|
HRTIMER_MODE_REL_SOFT);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* now we can register for can_ids, if we added a new bcm_op */
|
|
|
|
if (do_rx_register) {
|
|
|
|
if (ifindex) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(sock_net(sk), ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (dev) {
|
2017-04-25 06:19:42 +00:00
|
|
|
err = can_rx_register(sock_net(sk), dev,
|
2017-02-21 11:19:47 +00:00
|
|
|
op->can_id,
|
2007-11-16 23:53:52 +00:00
|
|
|
REGMASK(op->can_id),
|
|
|
|
bcm_rx_handler, op,
|
2017-01-27 16:11:44 +00:00
|
|
|
"bcm", sk);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
op->rx_reg_dev = dev;
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
} else
|
2017-04-25 06:19:42 +00:00
|
|
|
err = can_rx_register(sock_net(sk), NULL, op->can_id,
|
2007-11-16 23:53:52 +00:00
|
|
|
REGMASK(op->can_id),
|
2017-01-27 16:11:44 +00:00
|
|
|
bcm_rx_handler, op, "bcm", sk);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err) {
|
|
|
|
/* this bcm rx op is broken -> remove it */
|
|
|
|
list_del(&op->list);
|
|
|
|
bcm_remove_op(op);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
return msg_head->nframes * op->cfsiz + MHSIZ;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
|
|
|
|
*/
|
2016-06-17 13:35:26 +00:00
|
|
|
static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
|
|
|
|
int cfsiz)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct net_device *dev;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* we need a real device to send frames */
|
|
|
|
if (!ifindex)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2016-06-17 13:35:26 +00:00
|
|
|
skb = alloc_skb(cfsiz + sizeof(struct can_skb_priv), GFP_KERNEL);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-01-28 08:33:33 +00:00
|
|
|
can_skb_reserve(skb);
|
2013-01-17 17:43:39 +00:00
|
|
|
|
2016-06-17 13:35:26 +00:00
|
|
|
err = memcpy_from_msg(skb_put(skb, cfsiz), msg, cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(sock_net(sk), ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!dev) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2013-01-28 08:33:33 +00:00
|
|
|
can_skb_prv(skb)->ifindex = dev->ifindex;
|
2015-06-26 09:58:19 +00:00
|
|
|
can_skb_prv(skb)->skbcnt = 0;
|
2007-11-16 23:53:52 +00:00
|
|
|
skb->dev = dev;
|
2014-01-30 09:11:28 +00:00
|
|
|
can_skb_set_owner(skb, sk);
|
2008-07-06 06:38:43 +00:00
|
|
|
err = can_send(skb, 1); /* send with loopback */
|
2007-11-16 23:53:52 +00:00
|
|
|
dev_put(dev);
|
|
|
|
|
2008-07-06 06:38:43 +00:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2016-06-17 13:35:26 +00:00
|
|
|
return cfsiz + MHSIZ;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bcm_sendmsg - process BCM commands (opcodes) from the userspace
|
|
|
|
*/
|
2015-03-02 07:37:48 +00:00
|
|
|
static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
|
|
|
|
struct bcm_msg_head msg_head;
|
2016-06-17 13:35:27 +00:00
|
|
|
int cfsiz;
|
2007-11-16 23:53:52 +00:00
|
|
|
int ret; /* read bytes or error codes as return value */
|
|
|
|
|
|
|
|
if (!bo->bound)
|
|
|
|
return -ENOTCONN;
|
|
|
|
|
2008-07-06 06:38:43 +00:00
|
|
|
/* check for valid message length from userspace */
|
2016-06-17 13:35:26 +00:00
|
|
|
if (size < MHSIZ)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* read message head information */
|
|
|
|
ret = memcpy_from_msg((u8 *)&msg_head, msg, MHSIZ);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2016-06-17 13:35:27 +00:00
|
|
|
cfsiz = CFSIZ(msg_head.flags);
|
|
|
|
if ((size - MHSIZ) % cfsiz)
|
2008-07-06 06:38:43 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* check for alternative ifindex for this bcm_op */
|
|
|
|
|
|
|
|
if (!ifindex && msg->msg_name) {
|
|
|
|
/* no bound device as default => check msg_name */
|
2014-01-17 21:53:15 +00:00
|
|
|
DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2021-03-25 12:58:48 +00:00
|
|
|
if (msg->msg_namelen < BCM_MIN_NAMELEN)
|
2011-01-16 04:56:42 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
if (addr->can_family != AF_CAN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* ifindex from sendto() */
|
|
|
|
ifindex = addr->can_ifindex;
|
|
|
|
|
|
|
|
if (ifindex) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(sock_net(sk), ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (!dev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (dev->type != ARPHRD_CAN) {
|
|
|
|
dev_put(dev);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
switch (msg_head.opcode) {
|
|
|
|
|
|
|
|
case TX_SETUP:
|
|
|
|
ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RX_SETUP:
|
|
|
|
ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TX_DELETE:
|
2016-06-17 13:35:26 +00:00
|
|
|
if (bcm_delete_tx_op(&bo->tx_ops, &msg_head, ifindex))
|
2007-11-16 23:53:52 +00:00
|
|
|
ret = MHSIZ;
|
|
|
|
else
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RX_DELETE:
|
2016-06-17 13:35:26 +00:00
|
|
|
if (bcm_delete_rx_op(&bo->rx_ops, &msg_head, ifindex))
|
2007-11-16 23:53:52 +00:00
|
|
|
ret = MHSIZ;
|
|
|
|
else
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TX_READ:
|
|
|
|
/* reuse msg_head for the reply to TX_READ */
|
|
|
|
msg_head.opcode = TX_STATUS;
|
|
|
|
ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RX_READ:
|
|
|
|
/* reuse msg_head for the reply to RX_READ */
|
|
|
|
msg_head.opcode = RX_STATUS;
|
|
|
|
ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case TX_SEND:
|
2016-06-17 13:35:25 +00:00
|
|
|
/* we need exactly one CAN frame behind the msg head */
|
2016-06-17 13:35:27 +00:00
|
|
|
if ((msg_head.nframes != 1) || (size != cfsiz + MHSIZ))
|
2007-11-16 23:53:52 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
2016-06-17 13:35:27 +00:00
|
|
|
ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
|
2007-11-16 23:53:52 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* notification handler for netdevice status changes
|
|
|
|
*/
|
2021-06-05 10:26:35 +00:00
|
|
|
static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
|
|
|
|
struct net_device *dev)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct sock *sk = &bo->sk;
|
|
|
|
struct bcm_op *op;
|
|
|
|
int notify_enodev = 0;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
if (!net_eq(dev_net(dev), sock_net(sk)))
|
2021-06-05 10:26:35 +00:00
|
|
|
return;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
switch (msg) {
|
|
|
|
|
|
|
|
case NETDEV_UNREGISTER:
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
/* remove device specific receive entries */
|
|
|
|
list_for_each_entry(op, &bo->rx_ops, list)
|
|
|
|
if (op->rx_reg_dev == dev)
|
|
|
|
bcm_rx_unreg(dev, op);
|
|
|
|
|
|
|
|
/* remove device reference, if this is our bound device */
|
|
|
|
if (bo->bound && bo->ifindex == dev->ifindex) {
|
|
|
|
bo->bound = 0;
|
|
|
|
bo->ifindex = 0;
|
|
|
|
notify_enodev = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
if (notify_enodev) {
|
|
|
|
sk->sk_err = ENODEV;
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD))
|
2021-06-27 22:48:21 +00:00
|
|
|
sk_error_report(sk);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case NETDEV_DOWN:
|
|
|
|
if (bo->bound && bo->ifindex == dev->ifindex) {
|
|
|
|
sk->sk_err = ENETDOWN;
|
|
|
|
if (!sock_flag(sk, SOCK_DEAD))
|
2021-06-27 22:48:21 +00:00
|
|
|
sk_error_report(sk);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-05 10:26:35 +00:00
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2021-06-05 10:26:35 +00:00
|
|
|
static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
|
|
|
|
if (dev->type != ARPHRD_CAN)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
|
|
|
|
spin_lock(&bcm_notifier_lock);
|
|
|
|
list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
|
|
|
|
spin_unlock(&bcm_notifier_lock);
|
|
|
|
bcm_notify(bcm_busy_notifier, msg, dev);
|
|
|
|
spin_lock(&bcm_notifier_lock);
|
|
|
|
}
|
|
|
|
bcm_busy_notifier = NULL;
|
|
|
|
spin_unlock(&bcm_notifier_lock);
|
2007-11-16 23:53:52 +00:00
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* initial settings for all BCM sockets to be set at socket creation time
|
|
|
|
*/
|
|
|
|
static int bcm_init(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
|
|
|
|
|
|
|
bo->bound = 0;
|
|
|
|
bo->ifindex = 0;
|
|
|
|
bo->dropped_usr_msgs = 0;
|
|
|
|
bo->bcm_proc_read = NULL;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&bo->tx_ops);
|
|
|
|
INIT_LIST_HEAD(&bo->rx_ops);
|
|
|
|
|
|
|
|
/* set notifier */
|
2021-06-05 10:26:35 +00:00
|
|
|
spin_lock(&bcm_notifier_lock);
|
|
|
|
list_add_tail(&bo->notifier, &bcm_notifier_list);
|
|
|
|
spin_unlock(&bcm_notifier_lock);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* standard socket functions
|
|
|
|
*/
|
|
|
|
static int bcm_release(struct socket *sock)
|
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
2017-09-08 15:02:35 +00:00
|
|
|
struct net *net;
|
2011-04-20 03:36:59 +00:00
|
|
|
struct bcm_sock *bo;
|
2007-11-16 23:53:52 +00:00
|
|
|
struct bcm_op *op, *next;
|
|
|
|
|
2017-09-08 15:02:35 +00:00
|
|
|
if (!sk)
|
2011-04-20 03:36:59 +00:00
|
|
|
return 0;
|
|
|
|
|
2017-09-08 15:02:35 +00:00
|
|
|
net = sock_net(sk);
|
2011-04-20 03:36:59 +00:00
|
|
|
bo = bcm_sk(sk);
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
/* remove bcm_ops, timer, rx_unregister(), etc. */
|
|
|
|
|
2021-06-05 10:26:35 +00:00
|
|
|
spin_lock(&bcm_notifier_lock);
|
|
|
|
while (bcm_busy_notifier == bo) {
|
|
|
|
spin_unlock(&bcm_notifier_lock);
|
|
|
|
schedule_timeout_uninterruptible(1);
|
|
|
|
spin_lock(&bcm_notifier_lock);
|
|
|
|
}
|
|
|
|
list_del(&bo->notifier);
|
|
|
|
spin_unlock(&bcm_notifier_lock);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, next, &bo->tx_ops, list)
|
|
|
|
bcm_remove_op(op);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
|
|
|
|
/*
|
|
|
|
* Don't care if we're bound or not (due to netdev problems)
|
|
|
|
* can_rx_unregister() is always a save thing to do here.
|
|
|
|
*/
|
|
|
|
if (op->ifindex) {
|
|
|
|
/*
|
|
|
|
* Only remove subscriptions that had not
|
|
|
|
* been removed due to NETDEV_UNREGISTER
|
|
|
|
* in bcm_notifier()
|
|
|
|
*/
|
|
|
|
if (op->rx_reg_dev) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(net, op->ifindex);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (dev) {
|
|
|
|
bcm_rx_unreg(dev, op);
|
|
|
|
dev_put(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
2017-04-25 06:19:42 +00:00
|
|
|
can_rx_unregister(net, NULL, op->can_id,
|
2007-11-16 23:53:52 +00:00
|
|
|
REGMASK(op->can_id),
|
|
|
|
bcm_rx_handler, op);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
can: bcm: delay release of struct bcm_op after synchronize_rcu()
can_rx_register() callbacks may be called concurrently to the call to
can_rx_unregister(). The callbacks and callback data, though, are
protected by RCU and the struct sock reference count.
So the callback data is really attached to the life of sk, meaning
that it should be released on sk_destruct. However, bcm_remove_op()
calls tasklet_kill(), and RCU callbacks may be called under RCU
softirq, so that cannot be used on kernels before the introduction of
HRTIMER_MODE_SOFT.
However, bcm_rx_handler() is called under RCU protection, so after
calling can_rx_unregister(), we may call synchronize_rcu() in order to
wait for any RCU read-side critical sections to finish. That is,
bcm_rx_handler() won't be called anymore for those ops. So, we only
free them, after we do that synchronize_rcu().
Fixes: ffd980f976e7 ("[CAN]: Add broadcast manager (bcm) protocol")
Link: https://lore.kernel.org/r/20210619161813.2098382-1-cascardo@canonical.com
Cc: linux-stable <stable@vger.kernel.org>
Reported-by: syzbot+0f7e7e5e2f4f40fa89c0@syzkaller.appspotmail.com
Reported-by: Norbert Slusarek <nslusarek@gmx.net>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
2021-06-19 16:18:13 +00:00
|
|
|
synchronize_rcu();
|
|
|
|
|
|
|
|
list_for_each_entry_safe(op, next, &bo->rx_ops, list)
|
|
|
|
bcm_remove_op(op);
|
|
|
|
|
2017-04-26 18:14:34 +00:00
|
|
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
2007-11-16 23:53:52 +00:00
|
|
|
/* remove procfs entry */
|
2017-04-25 06:19:42 +00:00
|
|
|
if (net->can.bcmproc_dir && bo->bcm_proc_read)
|
|
|
|
remove_proc_entry(bo->procname, net->can.bcmproc_dir);
|
2017-04-26 18:14:34 +00:00
|
|
|
#endif /* CONFIG_PROC_FS */
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* remove device reference */
|
|
|
|
if (bo->bound) {
|
|
|
|
bo->bound = 0;
|
|
|
|
bo->ifindex = 0;
|
|
|
|
}
|
|
|
|
|
2009-07-14 23:10:21 +00:00
|
|
|
sock_orphan(sk);
|
|
|
|
sock->sk = NULL;
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
release_sock(sk);
|
|
|
|
sock_put(sk);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct bcm_sock *bo = bcm_sk(sk);
|
2017-04-25 06:19:42 +00:00
|
|
|
struct net *net = sock_net(sk);
|
2016-10-24 19:11:26 +00:00
|
|
|
int ret = 0;
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2021-03-25 12:58:48 +00:00
|
|
|
if (len < BCM_MIN_NAMELEN)
|
2010-03-31 22:58:26 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-10-24 19:11:26 +00:00
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
if (bo->bound) {
|
|
|
|
ret = -EISCONN;
|
|
|
|
goto fail;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
/* bind a device to this socket */
|
|
|
|
if (addr->can_ifindex) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
dev = dev_get_by_index(net, addr->can_ifindex);
|
2016-10-24 19:11:26 +00:00
|
|
|
if (!dev) {
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto fail;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
if (dev->type != ARPHRD_CAN) {
|
|
|
|
dev_put(dev);
|
2016-10-24 19:11:26 +00:00
|
|
|
ret = -ENODEV;
|
|
|
|
goto fail;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bo->ifindex = dev->ifindex;
|
|
|
|
dev_put(dev);
|
|
|
|
|
|
|
|
} else {
|
|
|
|
/* no interface reference for ifindex = 0 ('any' CAN device) */
|
|
|
|
bo->ifindex = 0;
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:14:34 +00:00
|
|
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
2017-04-25 06:19:42 +00:00
|
|
|
if (net->can.bcmproc_dir) {
|
2007-11-16 23:53:52 +00:00
|
|
|
/* unique socket address as filename */
|
2010-12-26 06:54:53 +00:00
|
|
|
sprintf(bo->procname, "%lu", sock_i_ino(sk));
|
2018-04-13 18:38:35 +00:00
|
|
|
bo->bcm_proc_read = proc_create_net_single(bo->procname, 0644,
|
2017-04-25 06:19:42 +00:00
|
|
|
net->can.bcmproc_dir,
|
2018-04-13 18:38:35 +00:00
|
|
|
bcm_proc_show, sk);
|
2016-10-24 19:11:26 +00:00
|
|
|
if (!bo->bcm_proc_read) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
2017-04-26 18:14:34 +00:00
|
|
|
#endif /* CONFIG_PROC_FS */
|
2007-11-16 23:53:52 +00:00
|
|
|
|
2016-10-24 19:11:26 +00:00
|
|
|
bo->bound = 1;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
release_sock(sk);
|
|
|
|
|
|
|
|
return ret;
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
2015-03-02 07:37:48 +00:00
|
|
|
static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|
|
|
int flags)
|
2007-11-16 23:53:52 +00:00
|
|
|
{
|
|
|
|
struct sock *sk = sock->sk;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int error = 0;
|
|
|
|
int noblock;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
noblock = flags & MSG_DONTWAIT;
|
|
|
|
flags &= ~MSG_DONTWAIT;
|
|
|
|
skb = skb_recv_datagram(sk, flags, noblock, &error);
|
|
|
|
if (!skb)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
if (skb->len < size)
|
|
|
|
size = skb->len;
|
|
|
|
|
2014-04-07 01:51:23 +00:00
|
|
|
err = memcpy_to_msg(msg, skb->data, size);
|
2007-11-16 23:53:52 +00:00
|
|
|
if (err < 0) {
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
net: Generalize socket rx gap / receive queue overflow cmsg
Create a new socket level option to report number of queue overflows
Recently I augmented the AF_PACKET protocol to report the number of frames lost
on the socket receive queue between any two enqueued frames. This value was
exported via a SOL_PACKET level cmsg. AFter I completed that work it was
requested that this feature be generalized so that any datagram oriented socket
could make use of this option. As such I've created this patch, It creates a
new SOL_SOCKET level option called SO_RXQ_OVFL, which when enabled exports a
SOL_SOCKET level cmsg that reports the nubmer of times the sk_receive_queue
overflowed between any two given frames. It also augments the AF_PACKET
protocol to take advantage of this new feature (as it previously did not touch
sk->sk_drops, which this patch uses to record the overflow count). Tested
successfully by me.
Notes:
1) Unlike my previous patch, this patch simply records the sk_drops value, which
is not a number of drops between packets, but rather a total number of drops.
Deltas must be computed in user space.
2) While this patch currently works with datagram oriented protocols, it will
also be accepted by non-datagram oriented protocols. I'm not sure if thats
agreeable to everyone, but my argument in favor of doing so is that, for those
protocols which aren't applicable to this option, sk_drops will always be zero,
and reporting no drops on a receive queue that isn't used for those
non-participating protocols seems reasonable to me. This also saves us having
to code in a per-protocol opt in mechanism.
3) This applies cleanly to net-next assuming that commit
977750076d98c7ff6cbda51858bb5a5894a9d9ab (my af packet cmsg patch) is reverted
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-10-12 20:26:31 +00:00
|
|
|
sock_recv_ts_and_drops(msg, sk, skb);
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
if (msg->msg_name) {
|
2021-03-25 12:58:48 +00:00
|
|
|
__sockaddr_check_size(BCM_MIN_NAMELEN);
|
|
|
|
msg->msg_namelen = BCM_MIN_NAMELEN;
|
2007-11-16 23:53:52 +00:00
|
|
|
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
|
|
|
|
}
|
|
|
|
|
|
|
|
skb_free_datagram(sk, skb);
|
|
|
|
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2019-08-12 13:49:04 +00:00
|
|
|
static int bcm_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
2019-07-29 20:40:56 +00:00
|
|
|
{
|
|
|
|
/* no ioctls for socket layer -> hand it down to NIC layer */
|
|
|
|
return -ENOIOCTLCMD;
|
|
|
|
}
|
|
|
|
|
2011-03-22 08:27:25 +00:00
|
|
|
static const struct proto_ops bcm_ops = {
|
2007-11-16 23:53:52 +00:00
|
|
|
.family = PF_CAN,
|
|
|
|
.release = bcm_release,
|
|
|
|
.bind = sock_no_bind,
|
|
|
|
.connect = bcm_connect,
|
|
|
|
.socketpair = sock_no_socketpair,
|
|
|
|
.accept = sock_no_accept,
|
|
|
|
.getname = sock_no_getname,
|
2018-06-28 16:43:44 +00:00
|
|
|
.poll = datagram_poll,
|
2019-07-29 20:40:56 +00:00
|
|
|
.ioctl = bcm_sock_no_ioctlcmd,
|
2019-04-17 20:51:48 +00:00
|
|
|
.gettstamp = sock_gettstamp,
|
2007-11-16 23:53:52 +00:00
|
|
|
.listen = sock_no_listen,
|
|
|
|
.shutdown = sock_no_shutdown,
|
|
|
|
.sendmsg = bcm_sendmsg,
|
|
|
|
.recvmsg = bcm_recvmsg,
|
|
|
|
.mmap = sock_no_mmap,
|
|
|
|
.sendpage = sock_no_sendpage,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct proto bcm_proto __read_mostly = {
|
|
|
|
.name = "CAN_BCM",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.obj_size = sizeof(struct bcm_sock),
|
|
|
|
.init = bcm_init,
|
|
|
|
};
|
|
|
|
|
2011-05-03 18:40:57 +00:00
|
|
|
static const struct can_proto bcm_can_proto = {
|
2007-11-16 23:53:52 +00:00
|
|
|
.type = SOCK_DGRAM,
|
|
|
|
.protocol = CAN_BCM,
|
|
|
|
.ops = &bcm_ops,
|
|
|
|
.prot = &bcm_proto,
|
|
|
|
};
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
static int canbcm_pernet_init(struct net *net)
|
|
|
|
{
|
2017-04-26 18:14:34 +00:00
|
|
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
2017-04-25 06:19:42 +00:00
|
|
|
/* create /proc/net/can-bcm directory */
|
2017-04-26 18:14:34 +00:00
|
|
|
net->can.bcmproc_dir = proc_net_mkdir(net, "can-bcm", net->proc_net);
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
2017-04-25 06:19:42 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void canbcm_pernet_exit(struct net *net)
|
|
|
|
{
|
2017-04-26 18:14:34 +00:00
|
|
|
#if IS_ENABLED(CONFIG_PROC_FS)
|
2017-04-25 06:19:42 +00:00
|
|
|
/* remove /proc/net/can-bcm directory */
|
2017-04-26 18:14:34 +00:00
|
|
|
if (net->can.bcmproc_dir)
|
|
|
|
remove_proc_entry("can-bcm", net->proc_net);
|
|
|
|
#endif /* CONFIG_PROC_FS */
|
2017-04-25 06:19:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pernet_operations canbcm_pernet_ops __read_mostly = {
|
|
|
|
.init = canbcm_pernet_init,
|
|
|
|
.exit = canbcm_pernet_exit,
|
|
|
|
};
|
|
|
|
|
2021-06-05 10:26:35 +00:00
|
|
|
static struct notifier_block canbcm_notifier = {
|
|
|
|
.notifier_call = bcm_notifier
|
|
|
|
};
|
|
|
|
|
2007-11-16 23:53:52 +00:00
|
|
|
static int __init bcm_module_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2020-10-12 07:43:54 +00:00
|
|
|
pr_info("can: broadcast manager protocol\n");
|
2007-11-16 23:53:52 +00:00
|
|
|
|
|
|
|
err = can_proto_register(&bcm_can_proto);
|
|
|
|
if (err < 0) {
|
|
|
|
printk(KERN_ERR "can: registration of bcm protocol failed\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2017-04-25 06:19:42 +00:00
|
|
|
register_pernet_subsys(&canbcm_pernet_ops);
|
2021-06-05 10:26:35 +00:00
|
|
|
register_netdevice_notifier(&canbcm_notifier);
|
2007-11-16 23:53:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit bcm_module_exit(void)
|
|
|
|
{
|
|
|
|
can_proto_unregister(&bcm_can_proto);
|
2021-06-05 10:26:35 +00:00
|
|
|
unregister_netdevice_notifier(&canbcm_notifier);
|
2017-04-25 06:19:42 +00:00
|
|
|
unregister_pernet_subsys(&canbcm_pernet_ops);
|
2007-11-16 23:53:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(bcm_module_init);
|
|
|
|
module_exit(bcm_module_exit);
|