forked from Minki/linux
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (31 commits) [NETFILTER]: xt_conntrack: add compat support [NETFILTER]: iptable_raw: ignore short packets sent by SOCK_RAW sockets [NETFILTER]: iptable_{filter,mangle}: more descriptive "happy cracking" message [NETFILTER]: nf_nat: Clears helper private area when NATing [NETFILTER]: ctnetlink: clear helper area and handle unchanged helper [NETFILTER]: nf_conntrack: Removes unused destroy operation of l3proto [NETFILTER]: nf_conntrack: Removes duplicated declarations [NETFILTER]: nf_nat: remove unused argument of function allocating binding [NETFILTER]: Clean up table initialization [NET_SCHED]: Avoid requeue warning on dev_deactivate [NET_SCHED]: Reread dev->qdisc for NETDEV_TX_OK [NET_SCHED]: Rationalise return value of qdisc_restart [NET]: Fix dev->qdisc race for NETDEV_TX_LOCKED case [UDP]: Fix AF-specific references in AF-agnostic code. [IrDA]: KingSun/DonShine USB IrDA dongle support. [IPV6] ROUTE: Assign rt6i_idev for ip6_{prohibit,blk_hole}_entry. [IPV6]: Do no rely on skb->dst before it is assigned. [IPV6]: Send ICMPv6 error on scope violations. [SCTP]: Do not include ABORT chunk header in the notification. [SCTP]: Correctly copy addresses in sctp_copy_laddrs ...
This commit is contained in:
commit
ee54d2d87a
@ -307,7 +307,9 @@ static void hci_uart_tty_close(struct tty_struct *tty)
|
||||
|
||||
if (hu) {
|
||||
struct hci_dev *hdev = hu->hdev;
|
||||
hci_uart_close(hdev);
|
||||
|
||||
if (hdev)
|
||||
hci_uart_close(hdev);
|
||||
|
||||
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
|
||||
hu->proto->close(hu);
|
||||
@ -473,12 +475,18 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file * file,
|
||||
tty->low_latency = 1;
|
||||
} else
|
||||
return -EBUSY;
|
||||
break;
|
||||
|
||||
case HCIUARTGETPROTO:
|
||||
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
|
||||
return hu->proto->id;
|
||||
return -EUNATCH;
|
||||
|
||||
case HCIUARTGETDEVICE:
|
||||
if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
|
||||
return hu->hdev->id;
|
||||
return -EUNATCH;
|
||||
|
||||
default:
|
||||
err = n_tty_ioctl(tty, file, cmd, arg);
|
||||
break;
|
||||
|
@ -28,8 +28,9 @@
|
||||
#endif
|
||||
|
||||
/* Ioctls */
|
||||
#define HCIUARTSETPROTO _IOW('U', 200, int)
|
||||
#define HCIUARTGETPROTO _IOR('U', 201, int)
|
||||
#define HCIUARTSETPROTO _IOW('U', 200, int)
|
||||
#define HCIUARTGETPROTO _IOR('U', 201, int)
|
||||
#define HCIUARTGETDEVICE _IOR('U', 202, int)
|
||||
|
||||
/* UART protocols */
|
||||
#define HCI_UART_MAX_PROTO 4
|
||||
|
@ -141,6 +141,20 @@ config ACT200L_DONGLE
|
||||
To activate support for ACTiSYS IR-200L dongle you will have to
|
||||
start irattach like this: "irattach -d act200l".
|
||||
|
||||
config KINGSUN_DONGLE
|
||||
tristate "KingSun/DonShine DS-620 IrDA-USB dongle"
|
||||
depends on IRDA && USB && EXPERIMENTAL
|
||||
help
|
||||
Say Y or M here if you want to build support for the KingSun/DonShine
|
||||
DS-620 IrDA-USB bridge device driver.
|
||||
|
||||
This USB bridge does not conform to the IrDA-USB device class
|
||||
specification, and therefore needs its own specific driver. This
|
||||
dongle supports SIR speed only (9600 bps).
|
||||
|
||||
To compile it as a module, choose M here: the module will be called
|
||||
kingsun-sir.
|
||||
|
||||
comment "Old SIR device drivers"
|
||||
|
||||
config IRPORT_SIR
|
||||
|
@ -45,6 +45,7 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
|
||||
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
|
||||
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
|
||||
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
|
||||
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
|
||||
|
||||
# The SIR helper module
|
||||
sir-dev-objs := sir_dev.o sir_dongle.o
|
||||
|
657
drivers/net/irda/kingsun-sir.c
Normal file
657
drivers/net/irda/kingsun-sir.c
Normal file
@ -0,0 +1,657 @@
|
||||
/*****************************************************************************
|
||||
*
|
||||
* Filename: kingsun-sir.c
|
||||
* Version: 0.1.1
|
||||
* Description: Irda KingSun/DonShine USB Dongle
|
||||
* Status: Experimental
|
||||
* Author: Alex Villac<EFBFBD>s Lasso <a_villacis@palosanto.com>
|
||||
*
|
||||
* Based on stir4200 and mcs7780 drivers, with (strange?) differences
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
/*
|
||||
* This is my current (2007-04-25) understanding of how this dongle is supposed
|
||||
* to work. This is based on reverse-engineering and examination of the packet
|
||||
* data sent and received by the WinXP driver using USBSnoopy. Feel free to
|
||||
* update here as more of this dongle is known:
|
||||
*
|
||||
* General: Unlike the other USB IrDA dongles, this particular dongle exposes,
|
||||
* not two bulk (in and out) endpoints, but two *interrupt* ones. This dongle,
|
||||
* like the bulk based ones (stir4200.c and mcs7780.c), requires polling in
|
||||
* order to receive data.
|
||||
* Transmission: Just like stir4200, this dongle uses a raw stream of data,
|
||||
* which needs to be wrapped and escaped in a similar way as in stir4200.c.
|
||||
* Reception: Poll-based, as in stir4200. Each read returns the contents of a
|
||||
* 8-byte buffer, of which the first byte (LSB) indicates the number of bytes
|
||||
* (1-7) of valid data contained within the remaining 7 bytes. For example, if
|
||||
* the buffer had the following contents:
|
||||
* 06 ff ff ff c0 01 04 aa
|
||||
* This means that (06) there are 6 bytes of valid data. The byte 0xaa at the
|
||||
* end is garbage (left over from a previous reception) and is discarded.
|
||||
* If a read returns an "impossible" value as the length of valid data (such as
|
||||
* 0x36) in the first byte, then the buffer is uninitialized (as is the case of
|
||||
* first plug-in) and its contents should be discarded. There is currently no
|
||||
* evidence that the top 5 bits of the 1st byte of the buffer can have values
|
||||
* other than 0 once reception begins.
|
||||
* Once valid bytes are collected, the assembled stream is a sequence of
|
||||
* wrapped IrDA frames that is unwrapped and unescaped as in stir4200.c.
|
||||
* BIG FAT WARNING: the dongle does *not* reset the RX buffer in any way after
|
||||
* a successful read from the host, which means that in absence of further
|
||||
* reception, repeated reads from the dongle will return the exact same
|
||||
* contents repeatedly. Attempts to be smart and cache a previous read seem
|
||||
* to result in corrupted packets, so this driver depends on the unwrap logic
|
||||
* to sort out any repeated reads.
|
||||
* Speed change: no commands observed so far to change speed, assumed fixed
|
||||
* 9600bps (SIR).
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <net/irda/irda.h>
|
||||
#include <net/irda/wrapper.h>
|
||||
#include <net/irda/crc.h>
|
||||
|
||||
/*
|
||||
* According to lsusb, 0x07c0 is assigned to
|
||||
* "Code Mercenaries Hard- und Software GmbH"
|
||||
*/
|
||||
#define KING_VENDOR_ID 0x07c0
|
||||
#define KING_PRODUCT_ID 0x4200
|
||||
|
||||
/* These are the currently known USB ids */
|
||||
static struct usb_device_id dongles[] = {
|
||||
/* KingSun Co,Ltd IrDA/USB Bridge */
|
||||
{ USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) },
|
||||
{ }
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(usb, dongles);
|
||||
|
||||
#define KINGSUN_MTT 0x07
|
||||
|
||||
#define KINGSUN_FIFO_SIZE 4096
|
||||
#define KINGSUN_EP_IN 0
|
||||
#define KINGSUN_EP_OUT 1
|
||||
|
||||
struct kingsun_cb {
|
||||
struct usb_device *usbdev; /* init: probe_irda */
|
||||
struct net_device *netdev; /* network layer */
|
||||
struct irlap_cb *irlap; /* The link layer we are binded to */
|
||||
struct net_device_stats stats; /* network statistics */
|
||||
struct qos_info qos;
|
||||
|
||||
__u8 *in_buf; /* receive buffer */
|
||||
__u8 *out_buf; /* transmit buffer */
|
||||
__u8 max_rx; /* max. atomic read from dongle
|
||||
(usually 8), also size of in_buf */
|
||||
__u8 max_tx; /* max. atomic write to dongle
|
||||
(usually 8) */
|
||||
|
||||
iobuff_t rx_buff; /* receive unwrap state machine */
|
||||
struct timeval rx_time;
|
||||
spinlock_t lock;
|
||||
int receiving;
|
||||
|
||||
__u8 ep_in;
|
||||
__u8 ep_out;
|
||||
|
||||
struct urb *tx_urb;
|
||||
struct urb *rx_urb;
|
||||
};
|
||||
|
||||
/* Callback transmission routine */
|
||||
static void kingsun_send_irq(struct urb *urb)
|
||||
{
|
||||
struct kingsun_cb *kingsun = urb->context;
|
||||
struct net_device *netdev = kingsun->netdev;
|
||||
|
||||
/* in process of stopping, just drop data */
|
||||
if (!netif_running(kingsun->netdev)) {
|
||||
err("kingsun_send_irq: Network not running!");
|
||||
return;
|
||||
}
|
||||
|
||||
/* unlink, shutdown, unplug, other nasties */
|
||||
if (urb->status != 0) {
|
||||
err("kingsun_send_irq: urb asynchronously failed - %d",
|
||||
urb->status);
|
||||
}
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from net/core when new frame is available.
|
||||
*/
|
||||
static int kingsun_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
struct kingsun_cb *kingsun;
|
||||
int wraplen;
|
||||
int ret = 0;
|
||||
|
||||
if (skb == NULL || netdev == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
/* the IRDA wrapping routines don't deal with non linear skb */
|
||||
SKB_LINEAR_ASSERT(skb);
|
||||
|
||||
kingsun = netdev_priv(netdev);
|
||||
|
||||
spin_lock(&kingsun->lock);
|
||||
|
||||
/* Append data to the end of whatever data remains to be transmitted */
|
||||
wraplen = async_wrap_skb(skb,
|
||||
kingsun->out_buf,
|
||||
KINGSUN_FIFO_SIZE);
|
||||
|
||||
/* Calculate how much data can be transmitted in this urb */
|
||||
usb_fill_int_urb(kingsun->tx_urb, kingsun->usbdev,
|
||||
usb_sndintpipe(kingsun->usbdev, kingsun->ep_out),
|
||||
kingsun->out_buf, wraplen, kingsun_send_irq,
|
||||
kingsun, 1);
|
||||
|
||||
if ((ret = usb_submit_urb(kingsun->tx_urb, GFP_ATOMIC))) {
|
||||
err("kingsun_hard_xmit: failed tx_urb submit: %d", ret);
|
||||
switch (ret) {
|
||||
case -ENODEV:
|
||||
case -EPIPE:
|
||||
break;
|
||||
default:
|
||||
kingsun->stats.tx_errors++;
|
||||
netif_start_queue(netdev);
|
||||
}
|
||||
} else {
|
||||
kingsun->stats.tx_packets++;
|
||||
kingsun->stats.tx_bytes += skb->len;
|
||||
}
|
||||
|
||||
dev_kfree_skb(skb);
|
||||
spin_unlock(&kingsun->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Receive callback function */
|
||||
static void kingsun_rcv_irq(struct urb *urb)
|
||||
{
|
||||
struct kingsun_cb *kingsun = urb->context;
|
||||
int ret;
|
||||
|
||||
/* in process of stopping, just drop data */
|
||||
if (!netif_running(kingsun->netdev)) {
|
||||
kingsun->receiving = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* unlink, shutdown, unplug, other nasties */
|
||||
if (urb->status != 0) {
|
||||
err("kingsun_rcv_irq: urb asynchronously failed - %d",
|
||||
urb->status);
|
||||
kingsun->receiving = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (urb->actual_length == kingsun->max_rx) {
|
||||
__u8 *bytes = urb->transfer_buffer;
|
||||
int i;
|
||||
|
||||
/* The very first byte in the buffer indicates the length of
|
||||
valid data in the read. This byte must be in the range
|
||||
1..kingsun->max_rx -1 . Values outside this range indicate
|
||||
an uninitialized Rx buffer when the dongle has just been
|
||||
plugged in. */
|
||||
if (bytes[0] >= 1 && bytes[0] < kingsun->max_rx) {
|
||||
for (i = 1; i <= bytes[0]; i++) {
|
||||
async_unwrap_char(kingsun->netdev,
|
||||
&kingsun->stats,
|
||||
&kingsun->rx_buff, bytes[i]);
|
||||
}
|
||||
kingsun->netdev->last_rx = jiffies;
|
||||
do_gettimeofday(&kingsun->rx_time);
|
||||
kingsun->receiving =
|
||||
(kingsun->rx_buff.state != OUTSIDE_FRAME)
|
||||
? 1 : 0;
|
||||
}
|
||||
} else if (urb->actual_length > 0) {
|
||||
err("%s(): Unexpected response length, expected %d got %d",
|
||||
__FUNCTION__, kingsun->max_rx, urb->actual_length);
|
||||
}
|
||||
/* This urb has already been filled in kingsun_net_open */
|
||||
ret = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
/*
|
||||
* Function kingsun_net_open (dev)
|
||||
*
|
||||
* Network device is taken up. Usually this is done by "ifconfig irda0 up"
|
||||
*/
|
||||
static int kingsun_net_open(struct net_device *netdev)
|
||||
{
|
||||
struct kingsun_cb *kingsun = netdev_priv(netdev);
|
||||
int err = -ENOMEM;
|
||||
char hwname[16];
|
||||
|
||||
/* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
|
||||
kingsun->receiving = 0;
|
||||
|
||||
/* Initialize for SIR to copy data directly into skb. */
|
||||
kingsun->rx_buff.in_frame = FALSE;
|
||||
kingsun->rx_buff.state = OUTSIDE_FRAME;
|
||||
kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
|
||||
kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
|
||||
if (!kingsun->rx_buff.skb)
|
||||
goto free_mem;
|
||||
|
||||
skb_reserve(kingsun->rx_buff.skb, 1);
|
||||
kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
|
||||
do_gettimeofday(&kingsun->rx_time);
|
||||
|
||||
kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!kingsun->rx_urb)
|
||||
goto free_mem;
|
||||
|
||||
kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!kingsun->tx_urb)
|
||||
goto free_mem;
|
||||
|
||||
/*
|
||||
* Now that everything should be initialized properly,
|
||||
* Open new IrLAP layer instance to take care of us...
|
||||
*/
|
||||
sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
|
||||
kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
|
||||
if (!kingsun->irlap) {
|
||||
err("kingsun-sir: irlap_open failed");
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
/* Start first reception */
|
||||
usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
|
||||
usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
|
||||
kingsun->in_buf, kingsun->max_rx,
|
||||
kingsun_rcv_irq, kingsun, 1);
|
||||
kingsun->rx_urb->status = 0;
|
||||
err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
|
||||
if (err) {
|
||||
err("kingsun-sir: first urb-submit failed: %d", err);
|
||||
goto close_irlap;
|
||||
}
|
||||
|
||||
netif_start_queue(netdev);
|
||||
|
||||
/* Situation at this point:
|
||||
- all work buffers allocated
|
||||
- urbs allocated and ready to fill
|
||||
- max rx packet known (in max_rx)
|
||||
- unwrap state machine initialized, in state outside of any frame
|
||||
- receive request in progress
|
||||
- IrLAP layer started, about to hand over packets to send
|
||||
*/
|
||||
|
||||
return 0;
|
||||
|
||||
close_irlap:
|
||||
irlap_close(kingsun->irlap);
|
||||
free_mem:
|
||||
if (kingsun->tx_urb) {
|
||||
usb_free_urb(kingsun->tx_urb);
|
||||
kingsun->tx_urb = NULL;
|
||||
}
|
||||
if (kingsun->rx_urb) {
|
||||
usb_free_urb(kingsun->rx_urb);
|
||||
kingsun->rx_urb = NULL;
|
||||
}
|
||||
if (kingsun->rx_buff.skb) {
|
||||
kfree_skb(kingsun->rx_buff.skb);
|
||||
kingsun->rx_buff.skb = NULL;
|
||||
kingsun->rx_buff.head = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function kingsun_net_close (kingsun)
|
||||
*
|
||||
* Network device is taken down. Usually this is done by
|
||||
* "ifconfig irda0 down"
|
||||
*/
|
||||
static int kingsun_net_close(struct net_device *netdev)
|
||||
{
|
||||
struct kingsun_cb *kingsun = netdev_priv(netdev);
|
||||
|
||||
/* Stop transmit processing */
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
/* Mop up receive && transmit urb's */
|
||||
usb_kill_urb(kingsun->tx_urb);
|
||||
usb_kill_urb(kingsun->rx_urb);
|
||||
|
||||
usb_free_urb(kingsun->tx_urb);
|
||||
usb_free_urb(kingsun->rx_urb);
|
||||
|
||||
kingsun->tx_urb = NULL;
|
||||
kingsun->rx_urb = NULL;
|
||||
|
||||
kfree_skb(kingsun->rx_buff.skb);
|
||||
kingsun->rx_buff.skb = NULL;
|
||||
kingsun->rx_buff.head = NULL;
|
||||
kingsun->rx_buff.in_frame = FALSE;
|
||||
kingsun->rx_buff.state = OUTSIDE_FRAME;
|
||||
kingsun->receiving = 0;
|
||||
|
||||
/* Stop and remove instance of IrLAP */
|
||||
if (kingsun->irlap)
|
||||
irlap_close(kingsun->irlap);
|
||||
|
||||
kingsun->irlap = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* IOCTLs : Extra out-of-band network commands...
|
||||
*/
|
||||
static int kingsun_net_ioctl(struct net_device *netdev, struct ifreq *rq,
|
||||
int cmd)
|
||||
{
|
||||
struct if_irda_req *irq = (struct if_irda_req *) rq;
|
||||
struct kingsun_cb *kingsun = netdev_priv(netdev);
|
||||
int ret = 0;
|
||||
|
||||
switch (cmd) {
|
||||
case SIOCSBANDWIDTH: /* Set bandwidth */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
/* Check if the device is still there */
|
||||
if (netif_device_present(kingsun->netdev))
|
||||
/* No observed commands for speed change */
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
|
||||
case SIOCSMEDIABUSY: /* Set media busy */
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
/* Check if the IrDA stack is still there */
|
||||
if (netif_running(kingsun->netdev))
|
||||
irda_device_set_media_busy(kingsun->netdev, TRUE);
|
||||
break;
|
||||
|
||||
case SIOCGRECEIVING:
|
||||
/* Only approximately true */
|
||||
irq->ifr_receiving = kingsun->receiving;
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get device stats (for /proc/net/dev and ifconfig)
|
||||
*/
|
||||
static struct net_device_stats *
|
||||
kingsun_net_get_stats(struct net_device *netdev)
|
||||
{
|
||||
struct kingsun_cb *kingsun = netdev_priv(netdev);
|
||||
return &kingsun->stats;
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called by the USB subsystem for each new device
|
||||
* in the system. We need to check if the device is ours, and in
|
||||
* this case start handling it.
|
||||
*/
|
||||
static int kingsun_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_host_interface *interface;
|
||||
struct usb_endpoint_descriptor *endpoint;
|
||||
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct kingsun_cb *kingsun = NULL;
|
||||
struct net_device *net = NULL;
|
||||
int ret = -ENOMEM;
|
||||
int pipe, maxp_in, maxp_out;
|
||||
__u8 ep_in;
|
||||
__u8 ep_out;
|
||||
|
||||
/* Check that there really are two interrupt endpoints.
|
||||
Check based on the one in drivers/usb/input/usbmouse.c
|
||||
*/
|
||||
interface = intf->cur_altsetting;
|
||||
if (interface->desc.bNumEndpoints != 2) {
|
||||
err("kingsun-sir: expected 2 endpoints, found %d",
|
||||
interface->desc.bNumEndpoints);
|
||||
return -ENODEV;
|
||||
}
|
||||
endpoint = &interface->endpoint[KINGSUN_EP_IN].desc;
|
||||
if (!usb_endpoint_is_int_in(endpoint)) {
|
||||
err("kingsun-sir: endpoint 0 is not interrupt IN");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ep_in = endpoint->bEndpointAddress;
|
||||
pipe = usb_rcvintpipe(dev, ep_in);
|
||||
maxp_in = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
|
||||
if (maxp_in > 255 || maxp_in <= 1) {
|
||||
err("%s: endpoint 0 has max packet size %d not in range",
|
||||
__FILE__, maxp_in);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
endpoint = &interface->endpoint[KINGSUN_EP_OUT].desc;
|
||||
if (!usb_endpoint_is_int_out(endpoint)) {
|
||||
err("kingsun-sir: endpoint 1 is not interrupt OUT");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ep_out = endpoint->bEndpointAddress;
|
||||
pipe = usb_sndintpipe(dev, ep_out);
|
||||
maxp_out = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
|
||||
|
||||
/* Allocate network device container. */
|
||||
net = alloc_irdadev(sizeof(*kingsun));
|
||||
if(!net)
|
||||
goto err_out1;
|
||||
|
||||
SET_MODULE_OWNER(net);
|
||||
SET_NETDEV_DEV(net, &intf->dev);
|
||||
kingsun = netdev_priv(net);
|
||||
kingsun->irlap = NULL;
|
||||
kingsun->tx_urb = NULL;
|
||||
kingsun->rx_urb = NULL;
|
||||
kingsun->ep_in = ep_in;
|
||||
kingsun->ep_out = ep_out;
|
||||
kingsun->in_buf = NULL;
|
||||
kingsun->out_buf = NULL;
|
||||
kingsun->max_rx = (__u8)maxp_in;
|
||||
kingsun->max_tx = (__u8)maxp_out;
|
||||
kingsun->netdev = net;
|
||||
kingsun->usbdev = dev;
|
||||
kingsun->rx_buff.in_frame = FALSE;
|
||||
kingsun->rx_buff.state = OUTSIDE_FRAME;
|
||||
kingsun->rx_buff.skb = NULL;
|
||||
kingsun->receiving = 0;
|
||||
spin_lock_init(&kingsun->lock);
|
||||
|
||||
/* Allocate input buffer */
|
||||
kingsun->in_buf = (__u8 *)kmalloc(kingsun->max_rx, GFP_KERNEL);
|
||||
if (!kingsun->in_buf)
|
||||
goto free_mem;
|
||||
|
||||
/* Allocate output buffer */
|
||||
kingsun->out_buf = (__u8 *)kmalloc(KINGSUN_FIFO_SIZE, GFP_KERNEL);
|
||||
if (!kingsun->out_buf)
|
||||
goto free_mem;
|
||||
|
||||
printk(KERN_INFO "KingSun/DonShine IRDA/USB found at address %d, "
|
||||
"Vendor: %x, Product: %x\n",
|
||||
dev->devnum, le16_to_cpu(dev->descriptor.idVendor),
|
||||
le16_to_cpu(dev->descriptor.idProduct));
|
||||
|
||||
/* Initialize QoS for this device */
|
||||
irda_init_max_qos_capabilies(&kingsun->qos);
|
||||
|
||||
/* That's the Rx capability. */
|
||||
kingsun->qos.baud_rate.bits &= IR_9600;
|
||||
kingsun->qos.min_turn_time.bits &= KINGSUN_MTT;
|
||||
irda_qos_bits_to_value(&kingsun->qos);
|
||||
|
||||
/* Override the network functions we need to use */
|
||||
net->hard_start_xmit = kingsun_hard_xmit;
|
||||
net->open = kingsun_net_open;
|
||||
net->stop = kingsun_net_close;
|
||||
net->get_stats = kingsun_net_get_stats;
|
||||
net->do_ioctl = kingsun_net_ioctl;
|
||||
|
||||
ret = register_netdev(net);
|
||||
if (ret != 0)
|
||||
goto free_mem;
|
||||
|
||||
info("IrDA: Registered KingSun/DonShine device %s", net->name);
|
||||
|
||||
usb_set_intfdata(intf, kingsun);
|
||||
|
||||
/* Situation at this point:
|
||||
- all work buffers allocated
|
||||
- urbs not allocated, set to NULL
|
||||
- max rx packet known (in max_rx)
|
||||
- unwrap state machine (partially) initialized, but skb == NULL
|
||||
*/
|
||||
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
if (kingsun->out_buf) kfree(kingsun->out_buf);
|
||||
if (kingsun->in_buf) kfree(kingsun->in_buf);
|
||||
free_netdev(net);
|
||||
err_out1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The current device is removed, the USB layer tell us to shut it down...
|
||||
*/
|
||||
static void kingsun_disconnect(struct usb_interface *intf)
|
||||
{
|
||||
struct kingsun_cb *kingsun = usb_get_intfdata(intf);
|
||||
|
||||
if (!kingsun)
|
||||
return;
|
||||
|
||||
unregister_netdev(kingsun->netdev);
|
||||
|
||||
/* Mop up receive && transmit urb's */
|
||||
if (kingsun->tx_urb != NULL) {
|
||||
usb_kill_urb(kingsun->tx_urb);
|
||||
usb_free_urb(kingsun->tx_urb);
|
||||
kingsun->tx_urb = NULL;
|
||||
}
|
||||
if (kingsun->rx_urb != NULL) {
|
||||
usb_kill_urb(kingsun->rx_urb);
|
||||
usb_free_urb(kingsun->rx_urb);
|
||||
kingsun->rx_urb = NULL;
|
||||
}
|
||||
|
||||
kfree(kingsun->out_buf);
|
||||
kfree(kingsun->in_buf);
|
||||
free_netdev(kingsun->netdev);
|
||||
|
||||
usb_set_intfdata(intf, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* USB suspend, so power off the transmitter/receiver */
|
||||
static int kingsun_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct kingsun_cb *kingsun = usb_get_intfdata(intf);
|
||||
|
||||
netif_device_detach(kingsun->netdev);
|
||||
if (kingsun->tx_urb != NULL) usb_kill_urb(kingsun->tx_urb);
|
||||
if (kingsun->rx_urb != NULL) usb_kill_urb(kingsun->rx_urb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Coming out of suspend, so reset hardware */
|
||||
static int kingsun_resume(struct usb_interface *intf)
|
||||
{
|
||||
struct kingsun_cb *kingsun = usb_get_intfdata(intf);
|
||||
|
||||
if (kingsun->rx_urb != NULL)
|
||||
usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
|
||||
netif_device_attach(kingsun->netdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* USB device callbacks
|
||||
*/
|
||||
static struct usb_driver irda_driver = {
|
||||
.name = "kingsun-sir",
|
||||
.probe = kingsun_probe,
|
||||
.disconnect = kingsun_disconnect,
|
||||
.id_table = dongles,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = kingsun_suspend,
|
||||
.resume = kingsun_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Module insertion
|
||||
*/
|
||||
static int __init kingsun_init(void)
|
||||
{
|
||||
return usb_register(&irda_driver);
|
||||
}
|
||||
module_init(kingsun_init);
|
||||
|
||||
/*
|
||||
* Module removal
|
||||
*/
|
||||
static void __exit kingsun_cleanup(void)
|
||||
{
|
||||
/* Deregister the driver and remove all pending instances */
|
||||
usb_deregister(&irda_driver);
|
||||
}
|
||||
module_exit(kingsun_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Alex Villac<61>s Lasso <a_villacis@palosanto.com>");
|
||||
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
|
||||
MODULE_LICENSE("GPL");
|
@ -467,6 +467,8 @@ struct net_device
|
||||
/* device index hash chain */
|
||||
struct hlist_node index_hlist;
|
||||
|
||||
struct net_device *link_watch_next;
|
||||
|
||||
/* register/unregister state machine */
|
||||
enum { NETREG_UNINITIALIZED=0,
|
||||
NETREG_REGISTERED, /* completed register_netdevice */
|
||||
|
@ -54,6 +54,14 @@ struct xt_entry_target
|
||||
unsigned char data[0];
|
||||
};
|
||||
|
||||
#define XT_TARGET_INIT(__name, __size) \
|
||||
{ \
|
||||
.target.u.user = { \
|
||||
.target_size = XT_ALIGN(__size), \
|
||||
.name = __name, \
|
||||
}, \
|
||||
}
|
||||
|
||||
struct xt_standard_target
|
||||
{
|
||||
struct xt_entry_target target;
|
||||
|
@ -238,6 +238,47 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* Standard entry. */
|
||||
struct arpt_standard
|
||||
{
|
||||
struct arpt_entry entry;
|
||||
struct arpt_standard_target target;
|
||||
};
|
||||
|
||||
struct arpt_error_target
|
||||
{
|
||||
struct arpt_entry_target target;
|
||||
char errorname[ARPT_FUNCTION_MAXNAMELEN];
|
||||
};
|
||||
|
||||
struct arpt_error
|
||||
{
|
||||
struct arpt_entry entry;
|
||||
struct arpt_error_target target;
|
||||
};
|
||||
|
||||
#define ARPT_ENTRY_INIT(__size) \
|
||||
{ \
|
||||
.target_offset = sizeof(struct arpt_entry), \
|
||||
.next_offset = (__size), \
|
||||
}
|
||||
|
||||
#define ARPT_STANDARD_INIT(__verdict) \
|
||||
{ \
|
||||
.entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \
|
||||
.target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \
|
||||
sizeof(struct arpt_standard_target)), \
|
||||
.target.verdict = -(__verdict) - 1, \
|
||||
}
|
||||
|
||||
#define ARPT_ERROR_INIT \
|
||||
{ \
|
||||
.entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \
|
||||
.target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \
|
||||
sizeof(struct arpt_error_target)), \
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
#define arpt_register_target(tgt) \
|
||||
({ (tgt)->family = NF_ARP; \
|
||||
xt_register_target(tgt); })
|
||||
|
@ -295,6 +295,28 @@ struct ipt_error
|
||||
struct ipt_error_target target;
|
||||
};
|
||||
|
||||
#define IPT_ENTRY_INIT(__size) \
|
||||
{ \
|
||||
.target_offset = sizeof(struct ipt_entry), \
|
||||
.next_offset = (__size), \
|
||||
}
|
||||
|
||||
#define IPT_STANDARD_INIT(__verdict) \
|
||||
{ \
|
||||
.entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \
|
||||
.target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \
|
||||
sizeof(struct xt_standard_target)), \
|
||||
.target.verdict = -(__verdict) - 1, \
|
||||
}
|
||||
|
||||
#define IPT_ERROR_INIT \
|
||||
{ \
|
||||
.entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \
|
||||
.target = XT_TARGET_INIT(IPT_ERROR_TARGET, \
|
||||
sizeof(struct ipt_error_target)), \
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
extern unsigned int ipt_do_table(struct sk_buff **pskb,
|
||||
unsigned int hook,
|
||||
const struct net_device *in,
|
||||
|
@ -123,6 +123,28 @@ struct ip6t_error
|
||||
struct ip6t_error_target target;
|
||||
};
|
||||
|
||||
#define IP6T_ENTRY_INIT(__size) \
|
||||
{ \
|
||||
.target_offset = sizeof(struct ip6t_entry), \
|
||||
.next_offset = (__size), \
|
||||
}
|
||||
|
||||
#define IP6T_STANDARD_INIT(__verdict) \
|
||||
{ \
|
||||
.entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \
|
||||
.target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \
|
||||
sizeof(struct ip6t_standard_target)), \
|
||||
.target.verdict = -(__verdict) - 1, \
|
||||
}
|
||||
|
||||
#define IP6T_ERROR_INIT \
|
||||
{ \
|
||||
.entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \
|
||||
.target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \
|
||||
sizeof(struct ip6t_error_target)), \
|
||||
.target.errorname = "ERROR", \
|
||||
}
|
||||
|
||||
/*
|
||||
* New IP firewall options for [gs]etsockopt at the RAW IP level.
|
||||
* Unlike BSD Linux inherits IP options so you don't have to use
|
||||
|
@ -183,13 +183,6 @@ extern void nf_conntrack_hash_insert(struct nf_conn *ct);
|
||||
|
||||
extern void nf_conntrack_flush(void);
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
nf_ct_helper_find_get( const struct nf_conntrack_tuple *tuple);
|
||||
extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
|
||||
|
||||
extern struct nf_conntrack_helper *
|
||||
__nf_conntrack_helper_find_byname(const char *name);
|
||||
|
||||
extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
|
||||
const struct nf_conntrack_tuple *orig);
|
||||
|
||||
|
@ -56,9 +56,6 @@ struct nf_conntrack_l3proto
|
||||
*/
|
||||
int (*new)(struct nf_conn *conntrack, const struct sk_buff *skb);
|
||||
|
||||
/* Called when a conntrack entry is destroyed */
|
||||
void (*destroy)(struct nf_conn *conntrack);
|
||||
|
||||
/*
|
||||
* Called before tracking.
|
||||
* *dataoff: offset of protocol header (TCP, UDP,...) in *pskb
|
||||
|
@ -10,16 +10,11 @@ extern int nf_nat_rule_find(struct sk_buff **pskb,
|
||||
unsigned int hooknum,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
struct nf_conn *ct,
|
||||
struct nf_nat_info *info);
|
||||
struct nf_conn *ct);
|
||||
|
||||
extern unsigned int
|
||||
alloc_null_binding(struct nf_conn *ct,
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum);
|
||||
alloc_null_binding(struct nf_conn *ct, unsigned int hooknum);
|
||||
|
||||
extern unsigned int
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct,
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum);
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum);
|
||||
#endif /* _NF_NAT_RULE_H */
|
||||
|
@ -119,9 +119,16 @@ static inline void udp_lib_close(struct sock *sk, long timeout)
|
||||
}
|
||||
|
||||
|
||||
struct udp_get_port_ops {
|
||||
int (*saddr_cmp)(const struct sock *sk1, const struct sock *sk2);
|
||||
int (*saddr_any)(const struct sock *sk);
|
||||
unsigned int (*hash_port_and_rcv_saddr)(__u16 port,
|
||||
const struct sock *sk);
|
||||
};
|
||||
|
||||
/* net/ipv4/udp.c */
|
||||
extern int udp_get_port(struct sock *sk, unsigned short snum,
|
||||
int (*saddr_cmp)(const struct sock *, const struct sock *));
|
||||
const struct udp_get_port_ops *ops);
|
||||
extern void udp_err(struct sk_buff *, u32);
|
||||
|
||||
extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
|
||||
|
@ -120,5 +120,5 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
extern void udplite4_register(void);
|
||||
extern int udplite_get_port(struct sock *sk, unsigned short snum,
|
||||
int (*scmp)(const struct sock *, const struct sock *));
|
||||
const struct udp_get_port_ops *ops);
|
||||
#endif /* _UDPLITE_H */
|
||||
|
@ -174,7 +174,7 @@ static inline int hidp_queue_event(struct hidp_session *session, struct input_de
|
||||
|
||||
static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
|
||||
{
|
||||
struct hid_device *hid = dev->private;
|
||||
struct hid_device *hid = input_get_drvdata(dev);
|
||||
struct hidp_session *session = hid->driver_data;
|
||||
|
||||
return hidp_queue_event(session, dev, type, code, value);
|
||||
@ -182,7 +182,7 @@ static int hidp_hidinput_event(struct input_dev *dev, unsigned int type, unsigne
|
||||
|
||||
static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
|
||||
{
|
||||
struct hidp_session *session = dev->private;
|
||||
struct hidp_session *session = input_get_drvdata(dev);
|
||||
|
||||
return hidp_queue_event(session, dev, type, code, value);
|
||||
}
|
||||
@ -630,7 +630,7 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
|
||||
struct input_dev *input = session->input;
|
||||
int i;
|
||||
|
||||
input->private = session;
|
||||
input_set_drvdata(input, session);
|
||||
|
||||
input->name = "Bluetooth HID Boot Protocol Device";
|
||||
|
||||
@ -663,7 +663,7 @@ static inline void hidp_setup_input(struct hidp_session *session, struct hidp_co
|
||||
input->relbit[0] |= BIT(REL_WHEEL);
|
||||
}
|
||||
|
||||
input->cdev.dev = hidp_get_device(session);
|
||||
input->dev.parent = hidp_get_device(session);
|
||||
|
||||
input->event = hidp_input_event;
|
||||
|
||||
@ -864,7 +864,7 @@ failed:
|
||||
if (session->hid)
|
||||
hid_free_device(session->hid);
|
||||
|
||||
kfree(session->input);
|
||||
input_free_device(session->input);
|
||||
kfree(session);
|
||||
return err;
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bitops.h>
|
||||
@ -27,8 +26,7 @@
|
||||
|
||||
|
||||
enum lw_bits {
|
||||
LW_RUNNING = 0,
|
||||
LW_SE_USED
|
||||
LW_URGENT = 0,
|
||||
};
|
||||
|
||||
static unsigned long linkwatch_flags;
|
||||
@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent;
|
||||
static void linkwatch_event(struct work_struct *dummy);
|
||||
static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
|
||||
|
||||
static LIST_HEAD(lweventlist);
|
||||
static struct net_device *lweventlist;
|
||||
static DEFINE_SPINLOCK(lweventlist_lock);
|
||||
|
||||
struct lw_event {
|
||||
struct list_head list;
|
||||
struct net_device *dev;
|
||||
};
|
||||
|
||||
/* Avoid kmalloc() for most systems */
|
||||
static struct lw_event singleevent;
|
||||
|
||||
static unsigned char default_operstate(const struct net_device *dev)
|
||||
{
|
||||
if (!netif_carrier_ok(dev))
|
||||
@ -87,25 +77,102 @@ static void rfc2863_policy(struct net_device *dev)
|
||||
}
|
||||
|
||||
|
||||
/* Must be called with the rtnl semaphore held */
|
||||
void linkwatch_run_queue(void)
|
||||
static int linkwatch_urgent_event(struct net_device *dev)
|
||||
{
|
||||
struct list_head head, *n, *next;
|
||||
return netif_running(dev) && netif_carrier_ok(dev) &&
|
||||
dev->qdisc != dev->qdisc_sleeping;
|
||||
}
|
||||
|
||||
|
||||
static void linkwatch_add_event(struct net_device *dev)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lweventlist_lock, flags);
|
||||
dev->link_watch_next = lweventlist;
|
||||
lweventlist = dev;
|
||||
spin_unlock_irqrestore(&lweventlist_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
static void linkwatch_schedule_work(int urgent)
|
||||
{
|
||||
unsigned long delay = linkwatch_nextevent - jiffies;
|
||||
|
||||
if (test_bit(LW_URGENT, &linkwatch_flags))
|
||||
return;
|
||||
|
||||
/* Minimise down-time: drop delay for up event. */
|
||||
if (urgent) {
|
||||
if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
|
||||
return;
|
||||
delay = 0;
|
||||
}
|
||||
|
||||
/* If we wrap around we'll delay it by at most HZ. */
|
||||
if (delay > HZ)
|
||||
delay = 0;
|
||||
|
||||
/*
|
||||
* This is true if we've scheduled it immeditately or if we don't
|
||||
* need an immediate execution and it's already pending.
|
||||
*/
|
||||
if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
|
||||
return;
|
||||
|
||||
/* Don't bother if there is nothing urgent. */
|
||||
if (!test_bit(LW_URGENT, &linkwatch_flags))
|
||||
return;
|
||||
|
||||
/* It's already running which is good enough. */
|
||||
if (!cancel_delayed_work(&linkwatch_work))
|
||||
return;
|
||||
|
||||
/* Otherwise we reschedule it again for immediate exection. */
|
||||
schedule_delayed_work(&linkwatch_work, 0);
|
||||
}
|
||||
|
||||
|
||||
static void __linkwatch_run_queue(int urgent_only)
|
||||
{
|
||||
struct net_device *next;
|
||||
|
||||
/*
|
||||
* Limit the number of linkwatch events to one
|
||||
* per second so that a runaway driver does not
|
||||
* cause a storm of messages on the netlink
|
||||
* socket. This limit does not apply to up events
|
||||
* while the device qdisc is down.
|
||||
*/
|
||||
if (!urgent_only)
|
||||
linkwatch_nextevent = jiffies + HZ;
|
||||
/* Limit wrap-around effect on delay. */
|
||||
else if (time_after(linkwatch_nextevent, jiffies + HZ))
|
||||
linkwatch_nextevent = jiffies;
|
||||
|
||||
clear_bit(LW_URGENT, &linkwatch_flags);
|
||||
|
||||
spin_lock_irq(&lweventlist_lock);
|
||||
list_replace_init(&lweventlist, &head);
|
||||
next = lweventlist;
|
||||
lweventlist = NULL;
|
||||
spin_unlock_irq(&lweventlist_lock);
|
||||
|
||||
list_for_each_safe(n, next, &head) {
|
||||
struct lw_event *event = list_entry(n, struct lw_event, list);
|
||||
struct net_device *dev = event->dev;
|
||||
while (next) {
|
||||
struct net_device *dev = next;
|
||||
|
||||
if (event == &singleevent) {
|
||||
clear_bit(LW_SE_USED, &linkwatch_flags);
|
||||
} else {
|
||||
kfree(event);
|
||||
next = dev->link_watch_next;
|
||||
|
||||
if (urgent_only && !linkwatch_urgent_event(dev)) {
|
||||
linkwatch_add_event(dev);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the above read is complete since it can be
|
||||
* rewritten as soon as we clear the bit below.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
|
||||
/* We are about to handle this device,
|
||||
* so new events can be accepted
|
||||
*/
|
||||
@ -124,58 +191,39 @@ void linkwatch_run_queue(void)
|
||||
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
if (lweventlist)
|
||||
linkwatch_schedule_work(0);
|
||||
}
|
||||
|
||||
|
||||
/* Must be called with the rtnl semaphore held */
|
||||
void linkwatch_run_queue(void)
|
||||
{
|
||||
__linkwatch_run_queue(0);
|
||||
}
|
||||
|
||||
|
||||
static void linkwatch_event(struct work_struct *dummy)
|
||||
{
|
||||
/* Limit the number of linkwatch events to one
|
||||
* per second so that a runaway driver does not
|
||||
* cause a storm of messages on the netlink
|
||||
* socket
|
||||
*/
|
||||
linkwatch_nextevent = jiffies + HZ;
|
||||
clear_bit(LW_RUNNING, &linkwatch_flags);
|
||||
|
||||
rtnl_lock();
|
||||
linkwatch_run_queue();
|
||||
__linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
||||
void linkwatch_fire_event(struct net_device *dev)
|
||||
{
|
||||
int urgent = linkwatch_urgent_event(dev);
|
||||
|
||||
if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
|
||||
unsigned long flags;
|
||||
struct lw_event *event;
|
||||
|
||||
if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
|
||||
event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
|
||||
|
||||
if (unlikely(event == NULL)) {
|
||||
clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
event = &singleevent;
|
||||
}
|
||||
|
||||
dev_hold(dev);
|
||||
event->dev = dev;
|
||||
|
||||
spin_lock_irqsave(&lweventlist_lock, flags);
|
||||
list_add_tail(&event->list, &lweventlist);
|
||||
spin_unlock_irqrestore(&lweventlist_lock, flags);
|
||||
linkwatch_add_event(dev);
|
||||
} else if (!urgent)
|
||||
return;
|
||||
|
||||
if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
|
||||
unsigned long delay = linkwatch_nextevent - jiffies;
|
||||
|
||||
/* If we wrap around we'll delay it by at most HZ. */
|
||||
if (delay > HZ)
|
||||
delay = 0;
|
||||
schedule_delayed_work(&linkwatch_work, delay);
|
||||
}
|
||||
}
|
||||
linkwatch_schedule_work(urgent);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(linkwatch_fire_event);
|
||||
|
@ -15,128 +15,34 @@ MODULE_DESCRIPTION("arptables filter table");
|
||||
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
|
||||
(1 << NF_ARP_FORWARD))
|
||||
|
||||
/* Standard entry. */
|
||||
struct arpt_standard
|
||||
{
|
||||
struct arpt_entry entry;
|
||||
struct arpt_standard_target target;
|
||||
};
|
||||
|
||||
struct arpt_error_target
|
||||
{
|
||||
struct arpt_entry_target target;
|
||||
char errorname[ARPT_FUNCTION_MAXNAMELEN];
|
||||
};
|
||||
|
||||
struct arpt_error
|
||||
{
|
||||
struct arpt_entry entry;
|
||||
struct arpt_error_target target;
|
||||
};
|
||||
|
||||
static struct
|
||||
{
|
||||
struct arpt_replace repl;
|
||||
struct arpt_standard entries[3];
|
||||
struct arpt_error term;
|
||||
} initial_table __initdata
|
||||
= { { "filter", FILTER_VALID_HOOKS, 4,
|
||||
sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
|
||||
{ [NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), },
|
||||
{ [NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard), },
|
||||
0, NULL, { } },
|
||||
{
|
||||
/* ARP_IN */
|
||||
{
|
||||
{
|
||||
{
|
||||
{ 0 }, { 0 }, { 0 }, { 0 },
|
||||
0, 0,
|
||||
{ { 0, }, { 0, } },
|
||||
{ { 0, }, { 0, } },
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
"", "", { 0 }, { 0 },
|
||||
0, 0
|
||||
},
|
||||
sizeof(struct arpt_entry),
|
||||
sizeof(struct arpt_standard),
|
||||
0,
|
||||
{ 0, 0 }, { } },
|
||||
{ { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 }
|
||||
},
|
||||
/* ARP_OUT */
|
||||
{
|
||||
{
|
||||
{
|
||||
{ 0 }, { 0 }, { 0 }, { 0 },
|
||||
0, 0,
|
||||
{ { 0, }, { 0, } },
|
||||
{ { 0, }, { 0, } },
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
"", "", { 0 }, { 0 },
|
||||
0, 0
|
||||
},
|
||||
sizeof(struct arpt_entry),
|
||||
sizeof(struct arpt_standard),
|
||||
0,
|
||||
{ 0, 0 }, { } },
|
||||
{ { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 }
|
||||
},
|
||||
/* ARP_FORWARD */
|
||||
{
|
||||
{
|
||||
{
|
||||
{ 0 }, { 0 }, { 0 }, { 0 },
|
||||
0, 0,
|
||||
{ { 0, }, { 0, } },
|
||||
{ { 0, }, { 0, } },
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
"", "", { 0 }, { 0 },
|
||||
0, 0
|
||||
},
|
||||
sizeof(struct arpt_entry),
|
||||
sizeof(struct arpt_standard),
|
||||
0,
|
||||
{ 0, 0 }, { } },
|
||||
{ { { { ARPT_ALIGN(sizeof(struct arpt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 }
|
||||
}
|
||||
},
|
||||
/* ERROR */
|
||||
{
|
||||
{
|
||||
{
|
||||
{ 0 }, { 0 }, { 0 }, { 0 },
|
||||
0, 0,
|
||||
{ { 0, }, { 0, } },
|
||||
{ { 0, }, { 0, } },
|
||||
0, 0,
|
||||
0, 0,
|
||||
0, 0,
|
||||
"", "", { 0 }, { 0 },
|
||||
0, 0
|
||||
},
|
||||
sizeof(struct arpt_entry),
|
||||
sizeof(struct arpt_error),
|
||||
0,
|
||||
{ 0, 0 }, { } },
|
||||
{ { { { ARPT_ALIGN(sizeof(struct arpt_error_target)), ARPT_ERROR_TARGET } },
|
||||
{ } },
|
||||
"ERROR"
|
||||
}
|
||||
}
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
|
||||
.hook_entry = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
.underflow = {
|
||||
[NF_ARP_IN] = 0,
|
||||
[NF_ARP_OUT] = sizeof(struct arpt_standard),
|
||||
[NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
|
||||
ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
|
||||
},
|
||||
.term = ARPT_ERROR_INIT,
|
||||
};
|
||||
|
||||
static struct arpt_table packet_filter = {
|
||||
|
@ -26,53 +26,29 @@ static struct
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[3];
|
||||
struct ipt_error term;
|
||||
} initial_table __initdata
|
||||
= { { "filter", FILTER_VALID_HOOKS, 4,
|
||||
sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
{ [NF_IP_LOCAL_IN] = 0,
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
|
||||
{ [NF_IP_LOCAL_IN] = 0,
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
|
||||
0, NULL, { } },
|
||||
{
|
||||
/* LOCAL_IN */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* FORWARD */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_OUT */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } }
|
||||
},
|
||||
/* ERROR */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_error),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
|
||||
{ } },
|
||||
"ERROR"
|
||||
}
|
||||
}
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_IP_LOCAL_IN] = 0,
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP_LOCAL_IN] = 0,
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_filter = {
|
||||
@ -105,7 +81,8 @@ ipt_local_out_hook(unsigned int hook,
|
||||
if ((*pskb)->len < sizeof(struct iphdr)
|
||||
|| ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("ipt_hook: happy cracking.\n");
|
||||
printk("iptable_filter: ignoring short SOCK_RAW "
|
||||
"packet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
@ -33,73 +33,35 @@ static struct
|
||||
struct ipt_replace repl;
|
||||
struct ipt_standard entries[5];
|
||||
struct ipt_error term;
|
||||
} initial_table __initdata
|
||||
= { { "mangle", MANGLE_VALID_HOOKS, 6,
|
||||
sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
|
||||
{ [NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 },
|
||||
{ [NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4 },
|
||||
0, NULL, { } },
|
||||
{
|
||||
/* PRE_ROUTING */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_IN */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* FORWARD */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_OUT */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* POST_ROUTING */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
},
|
||||
/* ERROR */
|
||||
{ { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ipt_entry),
|
||||
sizeof(struct ipt_error),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
|
||||
{ } },
|
||||
"ERROR"
|
||||
}
|
||||
}
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.num_entries = 6,
|
||||
.size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_IN] = sizeof(struct ipt_standard),
|
||||
[NF_IP_FORWARD] = sizeof(struct ipt_standard) * 2,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
},
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_mangler = {
|
||||
@ -138,7 +100,8 @@ ipt_local_hook(unsigned int hook,
|
||||
if ((*pskb)->len < sizeof(struct iphdr)
|
||||
|| ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("ipt_hook: happy cracking.\n");
|
||||
printk("iptable_mangle: ignoring short SOCK_RAW "
|
||||
"packet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter_ipv4/ip_tables.h>
|
||||
#include <net/ip.h>
|
||||
|
||||
#define RAW_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT))
|
||||
|
||||
@ -21,62 +22,18 @@ static struct
|
||||
.size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
|
||||
.hook_entry = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) },
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard)
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
/* PRE_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
|
||||
/* LOCAL_OUT */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
/* ERROR */
|
||||
.term = {
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_error),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.user = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
|
||||
.name = IPT_ERROR_TARGET,
|
||||
},
|
||||
},
|
||||
},
|
||||
.errorname = "ERROR",
|
||||
},
|
||||
}
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_raw = {
|
||||
@ -98,6 +55,24 @@ ipt_hook(unsigned int hook,
|
||||
return ipt_do_table(pskb, hook, in, out, &packet_raw);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ipt_local_hook(unsigned int hook,
|
||||
struct sk_buff **pskb,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
int (*okfn)(struct sk_buff *))
|
||||
{
|
||||
/* root is playing with raw sockets. */
|
||||
if ((*pskb)->len < sizeof(struct iphdr) ||
|
||||
ip_hdrlen(*pskb) < sizeof(struct iphdr)) {
|
||||
if (net_ratelimit())
|
||||
printk("iptable_raw: ignoring short SOCK_RAW"
|
||||
"packet.\n");
|
||||
return NF_ACCEPT;
|
||||
}
|
||||
return ipt_do_table(pskb, hook, in, out, &packet_raw);
|
||||
}
|
||||
|
||||
/* 'raw' is the very first table. */
|
||||
static struct nf_hook_ops ipt_ops[] = {
|
||||
{
|
||||
@ -108,7 +83,7 @@ static struct nf_hook_ops ipt_ops[] = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
{
|
||||
.hook = ipt_hook,
|
||||
.hook = ipt_local_hook,
|
||||
.pf = PF_INET,
|
||||
.hooknum = NF_IP_LOCAL_OUT,
|
||||
.priority = NF_IP_PRI_RAW,
|
||||
|
@ -46,77 +46,20 @@ static struct
|
||||
.hook_entry = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP_PRE_ROUTING] = 0,
|
||||
[NF_IP_POST_ROUTING] = sizeof(struct ipt_standard),
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2 },
|
||||
[NF_IP_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
/* PRE_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
/* POST_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
/* LOCAL_OUT */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
/* ERROR */
|
||||
.term = {
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ipt_entry),
|
||||
.next_offset = sizeof(struct ipt_error),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.user = {
|
||||
.target_size = IPT_ALIGN(sizeof(struct ipt_error_target)),
|
||||
.name = IPT_ERROR_TARGET,
|
||||
},
|
||||
},
|
||||
},
|
||||
.errorname = "ERROR",
|
||||
},
|
||||
}
|
||||
.term = IPT_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table nat_table = {
|
||||
@ -230,9 +173,7 @@ static int ipt_dnat_checkentry(const char *tablename,
|
||||
}
|
||||
|
||||
inline unsigned int
|
||||
alloc_null_binding(struct nf_conn *ct,
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
|
||||
{
|
||||
/* Force range to this IP; let proto decide mapping for
|
||||
per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
|
||||
@ -251,9 +192,7 @@ alloc_null_binding(struct nf_conn *ct,
|
||||
}
|
||||
|
||||
unsigned int
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct,
|
||||
struct nf_nat_info *info,
|
||||
unsigned int hooknum)
|
||||
alloc_null_binding_confirmed(struct nf_conn *ct, unsigned int hooknum)
|
||||
{
|
||||
__be32 ip
|
||||
= (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
|
||||
@ -275,8 +214,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
|
||||
unsigned int hooknum,
|
||||
const struct net_device *in,
|
||||
const struct net_device *out,
|
||||
struct nf_conn *ct,
|
||||
struct nf_nat_info *info)
|
||||
struct nf_conn *ct)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -285,7 +223,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
|
||||
if (ret == NF_ACCEPT) {
|
||||
if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
|
||||
/* NUL mapping */
|
||||
ret = alloc_null_binding(ct, info, hooknum);
|
||||
ret = alloc_null_binding(ct, hooknum);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -80,7 +80,6 @@ nf_nat_fn(unsigned int hooknum,
|
||||
struct nf_conn *ct;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn_nat *nat;
|
||||
struct nf_nat_info *info;
|
||||
/* maniptype == SRC for postrouting. */
|
||||
enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
|
||||
|
||||
@ -129,7 +128,6 @@ nf_nat_fn(unsigned int hooknum,
|
||||
}
|
||||
/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
|
||||
case IP_CT_NEW:
|
||||
info = &nat->info;
|
||||
|
||||
/* Seen it before? This can happen for loopback, retrans,
|
||||
or local packets.. */
|
||||
@ -138,14 +136,13 @@ nf_nat_fn(unsigned int hooknum,
|
||||
|
||||
if (unlikely(nf_ct_is_confirmed(ct)))
|
||||
/* NAT module was loaded late */
|
||||
ret = alloc_null_binding_confirmed(ct, info,
|
||||
hooknum);
|
||||
ret = alloc_null_binding_confirmed(ct, hooknum);
|
||||
else if (hooknum == NF_IP_LOCAL_IN)
|
||||
/* LOCAL_IN hook doesn't have a chain! */
|
||||
ret = alloc_null_binding(ct, info, hooknum);
|
||||
ret = alloc_null_binding(ct, hooknum);
|
||||
else
|
||||
ret = nf_nat_rule_find(pskb, hooknum, in, out,
|
||||
ct, info);
|
||||
ct);
|
||||
|
||||
if (ret != NF_ACCEPT) {
|
||||
return ret;
|
||||
@ -160,10 +157,8 @@ nf_nat_fn(unsigned int hooknum,
|
||||
/* ESTABLISHED */
|
||||
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
|
||||
ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY));
|
||||
info = &nat->info;
|
||||
}
|
||||
|
||||
NF_CT_ASSERT(info);
|
||||
return nf_nat_packet(ct, ctinfo, hooknum, pskb);
|
||||
}
|
||||
|
||||
|
@ -118,15 +118,15 @@ static int udp_port_rover;
|
||||
* Note about this hash function :
|
||||
* Typical use is probably daddr = 0, only dport is going to vary hash
|
||||
*/
|
||||
static inline unsigned int hash_port_and_addr(__u16 port, __be32 addr)
|
||||
static inline unsigned int udp_hash_port(__u16 port)
|
||||
{
|
||||
addr ^= addr >> 16;
|
||||
addr ^= addr >> 8;
|
||||
return port ^ addr;
|
||||
return port;
|
||||
}
|
||||
|
||||
static inline int __udp_lib_port_inuse(unsigned int hash, int port,
|
||||
__be32 daddr, struct hlist_head udptable[])
|
||||
const struct sock *this_sk,
|
||||
struct hlist_head udptable[],
|
||||
const struct udp_get_port_ops *ops)
|
||||
{
|
||||
struct sock *sk;
|
||||
struct hlist_node *node;
|
||||
@ -138,7 +138,10 @@ static inline int __udp_lib_port_inuse(unsigned int hash, int port,
|
||||
inet = inet_sk(sk);
|
||||
if (inet->num != port)
|
||||
continue;
|
||||
if (inet->rcv_saddr == daddr)
|
||||
if (this_sk) {
|
||||
if (ops->saddr_cmp(sk, this_sk))
|
||||
return 1;
|
||||
} else if (ops->saddr_any(sk))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@ -151,12 +154,11 @@ static inline int __udp_lib_port_inuse(unsigned int hash, int port,
|
||||
* @snum: port number to look up
|
||||
* @udptable: hash list table, must be of UDP_HTABLE_SIZE
|
||||
* @port_rover: pointer to record of last unallocated port
|
||||
* @saddr_comp: AF-dependent comparison of bound local IP addresses
|
||||
* @ops: AF-dependent address operations
|
||||
*/
|
||||
int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
struct hlist_head udptable[], int *port_rover,
|
||||
int (*saddr_comp)(const struct sock *sk1,
|
||||
const struct sock *sk2 ) )
|
||||
const struct udp_get_port_ops *ops)
|
||||
{
|
||||
struct hlist_node *node;
|
||||
struct hlist_head *head;
|
||||
@ -176,8 +178,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) {
|
||||
int size;
|
||||
|
||||
hash = hash_port_and_addr(result,
|
||||
inet_sk(sk)->rcv_saddr);
|
||||
hash = ops->hash_port_and_rcv_saddr(result, sk);
|
||||
head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
|
||||
if (hlist_empty(head)) {
|
||||
if (result > sysctl_local_port_range[1])
|
||||
@ -203,17 +204,16 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
result = sysctl_local_port_range[0]
|
||||
+ ((result - sysctl_local_port_range[0]) &
|
||||
(UDP_HTABLE_SIZE - 1));
|
||||
hash = hash_port_and_addr(result, 0);
|
||||
hash = udp_hash_port(result);
|
||||
if (__udp_lib_port_inuse(hash, result,
|
||||
0, udptable))
|
||||
NULL, udptable, ops))
|
||||
continue;
|
||||
if (!inet_sk(sk)->rcv_saddr)
|
||||
if (ops->saddr_any(sk))
|
||||
break;
|
||||
|
||||
hash = hash_port_and_addr(result,
|
||||
inet_sk(sk)->rcv_saddr);
|
||||
hash = ops->hash_port_and_rcv_saddr(result, sk);
|
||||
if (! __udp_lib_port_inuse(hash, result,
|
||||
inet_sk(sk)->rcv_saddr, udptable))
|
||||
sk, udptable, ops))
|
||||
break;
|
||||
}
|
||||
if (i >= (1 << 16) / UDP_HTABLE_SIZE)
|
||||
@ -221,7 +221,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
gotit:
|
||||
*port_rover = snum = result;
|
||||
} else {
|
||||
hash = hash_port_and_addr(snum, 0);
|
||||
hash = udp_hash_port(snum);
|
||||
head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
|
||||
|
||||
sk_for_each(sk2, node, head)
|
||||
@ -231,12 +231,11 @@ gotit:
|
||||
(!sk2->sk_reuse || !sk->sk_reuse) &&
|
||||
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
|
||||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
|
||||
(*saddr_comp)(sk, sk2))
|
||||
ops->saddr_cmp(sk, sk2))
|
||||
goto fail;
|
||||
|
||||
if (inet_sk(sk)->rcv_saddr) {
|
||||
hash = hash_port_and_addr(snum,
|
||||
inet_sk(sk)->rcv_saddr);
|
||||
if (!ops->saddr_any(sk)) {
|
||||
hash = ops->hash_port_and_rcv_saddr(snum, sk);
|
||||
head = &udptable[hash & (UDP_HTABLE_SIZE - 1)];
|
||||
|
||||
sk_for_each(sk2, node, head)
|
||||
@ -248,7 +247,7 @@ gotit:
|
||||
!sk->sk_bound_dev_if ||
|
||||
sk2->sk_bound_dev_if ==
|
||||
sk->sk_bound_dev_if) &&
|
||||
(*saddr_comp)(sk, sk2))
|
||||
ops->saddr_cmp(sk, sk2))
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -266,12 +265,12 @@ fail:
|
||||
}
|
||||
|
||||
int udp_get_port(struct sock *sk, unsigned short snum,
|
||||
int (*scmp)(const struct sock *, const struct sock *))
|
||||
const struct udp_get_port_ops *ops)
|
||||
{
|
||||
return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, scmp);
|
||||
return __udp_lib_get_port(sk, snum, udp_hash, &udp_port_rover, ops);
|
||||
}
|
||||
|
||||
int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
|
||||
static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
|
||||
{
|
||||
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
|
||||
|
||||
@ -280,9 +279,33 @@ int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
|
||||
inet1->rcv_saddr == inet2->rcv_saddr ));
|
||||
}
|
||||
|
||||
static int ipv4_rcv_saddr_any(const struct sock *sk)
|
||||
{
|
||||
return !inet_sk(sk)->rcv_saddr;
|
||||
}
|
||||
|
||||
static inline unsigned int ipv4_hash_port_and_addr(__u16 port, __be32 addr)
|
||||
{
|
||||
addr ^= addr >> 16;
|
||||
addr ^= addr >> 8;
|
||||
return port ^ addr;
|
||||
}
|
||||
|
||||
static unsigned int ipv4_hash_port_and_rcv_saddr(__u16 port,
|
||||
const struct sock *sk)
|
||||
{
|
||||
return ipv4_hash_port_and_addr(port, inet_sk(sk)->rcv_saddr);
|
||||
}
|
||||
|
||||
const struct udp_get_port_ops udp_ipv4_ops = {
|
||||
.saddr_cmp = ipv4_rcv_saddr_equal,
|
||||
.saddr_any = ipv4_rcv_saddr_any,
|
||||
.hash_port_and_rcv_saddr = ipv4_hash_port_and_rcv_saddr,
|
||||
};
|
||||
|
||||
static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
return udp_get_port(sk, snum, ipv4_rcv_saddr_equal);
|
||||
return udp_get_port(sk, snum, &udp_ipv4_ops);
|
||||
}
|
||||
|
||||
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
|
||||
@ -297,8 +320,8 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
|
||||
unsigned int hash, hashwild;
|
||||
int score, best = -1, hport = ntohs(dport);
|
||||
|
||||
hash = hash_port_and_addr(hport, daddr);
|
||||
hashwild = hash_port_and_addr(hport, 0);
|
||||
hash = ipv4_hash_port_and_addr(hport, daddr);
|
||||
hashwild = udp_hash_port(hport);
|
||||
|
||||
read_lock(&udp_hash_lock);
|
||||
|
||||
@ -1198,8 +1221,8 @@ static int __udp4_lib_mcast_deliver(struct sk_buff *skb,
|
||||
struct sock *sk, *skw, *sknext;
|
||||
int dif;
|
||||
int hport = ntohs(uh->dest);
|
||||
unsigned int hash = hash_port_and_addr(hport, daddr);
|
||||
unsigned int hashwild = hash_port_and_addr(hport, 0);
|
||||
unsigned int hash = ipv4_hash_port_and_addr(hport, daddr);
|
||||
unsigned int hashwild = udp_hash_port(hport);
|
||||
|
||||
dif = skb->dev->ifindex;
|
||||
|
||||
|
@ -5,14 +5,14 @@
|
||||
#include <net/protocol.h>
|
||||
#include <net/inet_common.h>
|
||||
|
||||
extern const struct udp_get_port_ops udp_ipv4_ops;
|
||||
|
||||
extern int __udp4_lib_rcv(struct sk_buff *, struct hlist_head [], int );
|
||||
extern void __udp4_lib_err(struct sk_buff *, u32, struct hlist_head []);
|
||||
|
||||
extern int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
struct hlist_head udptable[], int *port_rover,
|
||||
int (*)(const struct sock*,const struct sock*));
|
||||
extern int ipv4_rcv_saddr_equal(const struct sock *, const struct sock *);
|
||||
|
||||
const struct udp_get_port_ops *ops);
|
||||
|
||||
extern int udp_setsockopt(struct sock *sk, int level, int optname,
|
||||
char __user *optval, int optlen);
|
||||
|
@ -19,14 +19,15 @@ struct hlist_head udplite_hash[UDP_HTABLE_SIZE];
|
||||
static int udplite_port_rover;
|
||||
|
||||
int udplite_get_port(struct sock *sk, unsigned short p,
|
||||
int (*c)(const struct sock *, const struct sock *))
|
||||
const struct udp_get_port_ops *ops)
|
||||
{
|
||||
return __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c);
|
||||
return __udp_lib_get_port(sk, p, udplite_hash,
|
||||
&udplite_port_rover, ops);
|
||||
}
|
||||
|
||||
static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal);
|
||||
return udplite_get_port(sk, snum, &udp_ipv4_ops);
|
||||
}
|
||||
|
||||
static int udplite_rcv(struct sk_buff *skb)
|
||||
|
@ -4204,6 +4204,10 @@ int __init addrconf_init(void)
|
||||
return err;
|
||||
|
||||
ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev);
|
||||
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
|
||||
ip6_prohibit_entry.rt6i_idev = in6_dev_get(&loopback_dev);
|
||||
ip6_blk_hole_entry.rt6i_idev = in6_dev_get(&loopback_dev);
|
||||
#endif
|
||||
|
||||
register_netdevice_notifier(&ipv6_dev_notf);
|
||||
|
||||
|
@ -660,6 +660,14 @@ EXPORT_SYMBOL_GPL(ipv6_invert_rthdr);
|
||||
Hop-by-hop options.
|
||||
**********************************/
|
||||
|
||||
/*
|
||||
* Note: we cannot rely on skb->dst before we assign it in ip6_route_input().
|
||||
*/
|
||||
static inline struct inet6_dev *ipv6_skb_idev(struct sk_buff *skb)
|
||||
{
|
||||
return skb->dst ? ip6_dst_idev(skb->dst) : __in6_dev_get(skb->dev);
|
||||
}
|
||||
|
||||
/* Router Alert as of RFC 2711 */
|
||||
|
||||
static int ipv6_hop_ra(struct sk_buff **skbp, int optoff)
|
||||
@ -688,25 +696,25 @@ static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff)
|
||||
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
|
||||
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
|
||||
nh[optoff+1]);
|
||||
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst),
|
||||
IP6_INC_STATS_BH(ipv6_skb_idev(skb),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
|
||||
if (pkt_len <= IPV6_MAXPLEN) {
|
||||
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
|
||||
IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
|
||||
return 0;
|
||||
}
|
||||
if (ipv6_hdr(skb)->payload_len) {
|
||||
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS);
|
||||
IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS);
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
|
||||
IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
IP6_INC_STATS_BH(ipv6_skb_idev(skb), IPSTATS_MIB_INTRUNCATEDPKTS);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@ -463,10 +463,17 @@ int ip6_forward(struct sk_buff *skb)
|
||||
*/
|
||||
if (xrlim_allow(dst, 1*HZ))
|
||||
ndisc_send_redirect(skb, n, target);
|
||||
} else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
|
||||
|IPV6_ADDR_LINKLOCAL)) {
|
||||
} else {
|
||||
int addrtype = ipv6_addr_type(&hdr->saddr);
|
||||
|
||||
/* This check is security critical. */
|
||||
goto error;
|
||||
if (addrtype & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK))
|
||||
goto error;
|
||||
if (addrtype & IPV6_ADDR_LINKLOCAL) {
|
||||
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
|
||||
ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
if (skb->len > dst_mtu(dst)) {
|
||||
|
@ -24,53 +24,29 @@ static struct
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[3];
|
||||
struct ip6t_error term;
|
||||
} initial_table __initdata
|
||||
= { { "filter", FILTER_VALID_HOOKS, 4,
|
||||
sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
|
||||
{ [NF_IP6_LOCAL_IN] = 0,
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 },
|
||||
{ [NF_IP6_LOCAL_IN] = 0,
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2 },
|
||||
0, NULL, { } },
|
||||
{
|
||||
/* LOCAL_IN */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* FORWARD */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_OUT */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } }
|
||||
},
|
||||
/* ERROR */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_error),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
|
||||
{ } },
|
||||
"ERROR"
|
||||
}
|
||||
}
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "filter",
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.num_entries = 4,
|
||||
.size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_IP6_LOCAL_IN] = 0,
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP6_LOCAL_IN] = 0,
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_filter = {
|
||||
|
@ -32,73 +32,35 @@ static struct
|
||||
struct ip6t_replace repl;
|
||||
struct ip6t_standard entries[5];
|
||||
struct ip6t_error term;
|
||||
} initial_table __initdata
|
||||
= { { "mangle", MANGLE_VALID_HOOKS, 6,
|
||||
sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
|
||||
{ [NF_IP6_PRE_ROUTING] = 0,
|
||||
[NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4},
|
||||
{ [NF_IP6_PRE_ROUTING] = 0,
|
||||
[NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4},
|
||||
0, NULL, { } },
|
||||
{
|
||||
/* PRE_ROUTING */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_IN */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* FORWARD */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* LOCAL_OUT */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } },
|
||||
/* POST_ROUTING */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_standard),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_standard_target)), "" } }, { } },
|
||||
-NF_ACCEPT - 1 } }
|
||||
},
|
||||
/* ERROR */
|
||||
{ { { { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, { { { 0 } } }, "", "", { 0 }, { 0 }, 0, 0, 0 },
|
||||
0,
|
||||
sizeof(struct ip6t_entry),
|
||||
sizeof(struct ip6t_error),
|
||||
0, { 0, 0 }, { } },
|
||||
{ { { { IP6T_ALIGN(sizeof(struct ip6t_error_target)), IP6T_ERROR_TARGET } },
|
||||
{ } },
|
||||
"ERROR"
|
||||
}
|
||||
}
|
||||
} initial_table __initdata = {
|
||||
.repl = {
|
||||
.name = "mangle",
|
||||
.valid_hooks = MANGLE_VALID_HOOKS,
|
||||
.num_entries = 6,
|
||||
.size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
|
||||
.hook_entry = {
|
||||
[NF_IP6_PRE_ROUTING] = 0,
|
||||
[NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
|
||||
},
|
||||
.underflow = {
|
||||
[NF_IP6_PRE_ROUTING] = 0,
|
||||
[NF_IP6_LOCAL_IN] = sizeof(struct ip6t_standard),
|
||||
[NF_IP6_FORWARD] = sizeof(struct ip6t_standard) * 2,
|
||||
[NF_IP6_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
|
||||
[NF_IP6_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
|
||||
},
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_mangler = {
|
||||
|
@ -35,56 +35,10 @@ static struct
|
||||
},
|
||||
},
|
||||
.entries = {
|
||||
/* PRE_ROUTING */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ip6t_entry),
|
||||
.next_offset = sizeof(struct ip6t_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
|
||||
/* LOCAL_OUT */
|
||||
{
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ip6t_entry),
|
||||
.next_offset = sizeof(struct ip6t_standard),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.target_size = IP6T_ALIGN(sizeof(struct ip6t_standard_target)),
|
||||
},
|
||||
},
|
||||
.verdict = -NF_ACCEPT - 1,
|
||||
},
|
||||
},
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
|
||||
IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
|
||||
},
|
||||
/* ERROR */
|
||||
.term = {
|
||||
.entry = {
|
||||
.target_offset = sizeof(struct ip6t_entry),
|
||||
.next_offset = sizeof(struct ip6t_error),
|
||||
},
|
||||
.target = {
|
||||
.target = {
|
||||
.u = {
|
||||
.user = {
|
||||
.target_size = IP6T_ALIGN(sizeof(struct ip6t_error_target)),
|
||||
.name = IP6T_ERROR_TARGET,
|
||||
},
|
||||
},
|
||||
},
|
||||
.errorname = "ERROR",
|
||||
},
|
||||
}
|
||||
.term = IP6T_ERROR_INIT, /* ERROR */
|
||||
};
|
||||
|
||||
static struct xt_table packet_raw = {
|
||||
|
@ -52,9 +52,28 @@
|
||||
|
||||
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
|
||||
|
||||
static int ipv6_rcv_saddr_any(const struct sock *sk)
|
||||
{
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
|
||||
return ipv6_addr_any(&np->rcv_saddr);
|
||||
}
|
||||
|
||||
static unsigned int ipv6_hash_port_and_rcv_saddr(__u16 port,
|
||||
const struct sock *sk)
|
||||
{
|
||||
return port;
|
||||
}
|
||||
|
||||
const struct udp_get_port_ops udp_ipv6_ops = {
|
||||
.saddr_cmp = ipv6_rcv_saddr_equal,
|
||||
.saddr_any = ipv6_rcv_saddr_any,
|
||||
.hash_port_and_rcv_saddr = ipv6_hash_port_and_rcv_saddr,
|
||||
};
|
||||
|
||||
static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
return udp_get_port(sk, snum, ipv6_rcv_saddr_equal);
|
||||
return udp_get_port(sk, snum, &udp_ipv6_ops);
|
||||
}
|
||||
|
||||
static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <net/addrconf.h>
|
||||
#include <net/inet_common.h>
|
||||
|
||||
extern const struct udp_get_port_ops udp_ipv6_ops;
|
||||
|
||||
extern int __udp6_lib_rcv(struct sk_buff **, struct hlist_head [], int );
|
||||
extern void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
|
||||
int , int , int , __be32 , struct hlist_head []);
|
||||
|
@ -37,7 +37,7 @@ static struct inet6_protocol udplitev6_protocol = {
|
||||
|
||||
static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
|
||||
{
|
||||
return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
|
||||
return udplite_get_port(sk, snum, &udp_ipv6_ops);
|
||||
}
|
||||
|
||||
struct proto udplitev6_prot = {
|
||||
|
@ -17,6 +17,7 @@
|
||||
* scan result table filtering (by capability (privacy, IBSS/BSS, WPA/RSN IE,
|
||||
* SSID)
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netdevice.h>
|
||||
@ -27,7 +28,6 @@
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <net/iw_handler.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/delay.h>
|
||||
|
||||
#include <net/mac80211.h>
|
||||
#include "ieee80211_i.h"
|
||||
|
@ -299,7 +299,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
||||
{
|
||||
struct nf_conn *ct = (struct nf_conn *)nfct;
|
||||
struct nf_conn_help *help = nfct_help(ct);
|
||||
struct nf_conntrack_l3proto *l3proto;
|
||||
struct nf_conntrack_l4proto *l4proto;
|
||||
typeof(nf_conntrack_destroyed) destroyed;
|
||||
|
||||
@ -317,10 +316,6 @@ destroy_conntrack(struct nf_conntrack *nfct)
|
||||
* destroy_conntrack() MUST NOT be called with a write lock
|
||||
* to nf_conntrack_lock!!! -HW */
|
||||
rcu_read_lock();
|
||||
l3proto = __nf_ct_l3proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
|
||||
if (l3proto && l3proto->destroy)
|
||||
l3proto->destroy(ct);
|
||||
|
||||
l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
|
||||
if (l4proto && l4proto->destroy)
|
||||
@ -893,8 +888,13 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
|
||||
NF_CT_DUMP_TUPLE(newreply);
|
||||
|
||||
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
|
||||
if (!ct->master && help && help->expecting == 0)
|
||||
help->helper = __nf_ct_helper_find(newreply);
|
||||
if (!ct->master && help && help->expecting == 0) {
|
||||
struct nf_conntrack_helper *helper;
|
||||
helper = __nf_ct_helper_find(newreply);
|
||||
if (helper)
|
||||
memset(&help->help, 0, sizeof(help->help));
|
||||
help->helper = helper;
|
||||
}
|
||||
write_unlock_bh(&nf_conntrack_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
|
||||
|
@ -830,11 +830,6 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
|
||||
char *helpname;
|
||||
int err;
|
||||
|
||||
if (!help) {
|
||||
/* FIXME: we need to reallocate and rehash */
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* don't change helper of sibling connections */
|
||||
if (ct->master)
|
||||
return -EINVAL;
|
||||
@ -843,25 +838,34 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nfattr *cda[])
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
if (!helper) {
|
||||
if (!strcmp(helpname, ""))
|
||||
helper = NULL;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (help->helper) {
|
||||
if (!helper) {
|
||||
if (!strcmp(helpname, "")) {
|
||||
if (help && help->helper) {
|
||||
/* we had a helper before ... */
|
||||
nf_ct_remove_expectations(ct);
|
||||
help->helper = NULL;
|
||||
} else {
|
||||
/* need to zero data of old helper */
|
||||
memset(&help->help, 0, sizeof(help->help));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!help) {
|
||||
/* FIXME: we need to reallocate and rehash */
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
helper = __nf_conntrack_helper_find_byname(helpname);
|
||||
if (helper == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (help->helper == helper)
|
||||
return 0;
|
||||
|
||||
if (help->helper)
|
||||
/* we had a helper before ... */
|
||||
nf_ct_remove_expectations(ct);
|
||||
|
||||
/* need to zero data of old helper */
|
||||
memset(&help->help, 0, sizeof(help->help));
|
||||
help->helper = helper;
|
||||
|
||||
return 0;
|
||||
|
@ -134,12 +134,66 @@ static void destroy(const struct xt_match *match, void *matchinfo)
|
||||
nf_ct_l3proto_module_put(match->family);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct compat_xt_conntrack_info
|
||||
{
|
||||
compat_uint_t statemask;
|
||||
compat_uint_t statusmask;
|
||||
struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
|
||||
struct in_addr sipmsk[IP_CT_DIR_MAX];
|
||||
struct in_addr dipmsk[IP_CT_DIR_MAX];
|
||||
compat_ulong_t expires_min;
|
||||
compat_ulong_t expires_max;
|
||||
u_int8_t flags;
|
||||
u_int8_t invflags;
|
||||
};
|
||||
|
||||
static void compat_from_user(void *dst, void *src)
|
||||
{
|
||||
struct compat_xt_conntrack_info *cm = src;
|
||||
struct xt_conntrack_info m = {
|
||||
.statemask = cm->statemask,
|
||||
.statusmask = cm->statusmask,
|
||||
.expires_min = cm->expires_min,
|
||||
.expires_max = cm->expires_max,
|
||||
.flags = cm->flags,
|
||||
.invflags = cm->invflags,
|
||||
};
|
||||
memcpy(m.tuple, cm->tuple, sizeof(m.tuple));
|
||||
memcpy(m.sipmsk, cm->sipmsk, sizeof(m.sipmsk));
|
||||
memcpy(m.dipmsk, cm->dipmsk, sizeof(m.dipmsk));
|
||||
memcpy(dst, &m, sizeof(m));
|
||||
}
|
||||
|
||||
static int compat_to_user(void __user *dst, void *src)
|
||||
{
|
||||
struct xt_conntrack_info *m = src;
|
||||
struct compat_xt_conntrack_info cm = {
|
||||
.statemask = m->statemask,
|
||||
.statusmask = m->statusmask,
|
||||
.expires_min = m->expires_min,
|
||||
.expires_max = m->expires_max,
|
||||
.flags = m->flags,
|
||||
.invflags = m->invflags,
|
||||
};
|
||||
memcpy(cm.tuple, m->tuple, sizeof(cm.tuple));
|
||||
memcpy(cm.sipmsk, m->sipmsk, sizeof(cm.sipmsk));
|
||||
memcpy(cm.dipmsk, m->dipmsk, sizeof(cm.dipmsk));
|
||||
return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct xt_match conntrack_match = {
|
||||
.name = "conntrack",
|
||||
.match = match,
|
||||
.checkentry = checkentry,
|
||||
.destroy = destroy,
|
||||
.matchsize = sizeof(struct xt_conntrack_info),
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compatsize = sizeof(struct compat_xt_conntrack_info),
|
||||
.compat_from_user = compat_from_user,
|
||||
.compat_to_user = compat_to_user,
|
||||
#endif
|
||||
.family = AF_INET,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
@ -71,12 +71,9 @@ void qdisc_unlock_tree(struct net_device *dev)
|
||||
|
||||
|
||||
/* Kick device.
|
||||
Note, that this procedure can be called by a watchdog timer, so that
|
||||
we do not check dev->tbusy flag here.
|
||||
|
||||
Returns: 0 - queue is empty.
|
||||
>0 - queue is not empty, but throttled.
|
||||
<0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
|
||||
Returns: 0 - queue is empty or throttled.
|
||||
>0 - queue is not empty.
|
||||
|
||||
NOTE: Called under dev->queue_lock with locally disabled BH.
|
||||
*/
|
||||
@ -115,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev)
|
||||
kfree_skb(skb);
|
||||
if (net_ratelimit())
|
||||
printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
|
||||
return -1;
|
||||
goto out;
|
||||
}
|
||||
__get_cpu_var(netdev_rx_stat).cpu_collision++;
|
||||
goto requeue;
|
||||
@ -135,10 +132,12 @@ static inline int qdisc_restart(struct net_device *dev)
|
||||
netif_tx_unlock(dev);
|
||||
}
|
||||
spin_lock(&dev->queue_lock);
|
||||
return -1;
|
||||
q = dev->qdisc;
|
||||
goto out;
|
||||
}
|
||||
if (ret == NETDEV_TX_LOCKED && nolock) {
|
||||
spin_lock(&dev->queue_lock);
|
||||
q = dev->qdisc;
|
||||
goto collision;
|
||||
}
|
||||
}
|
||||
@ -163,26 +162,28 @@ static inline int qdisc_restart(struct net_device *dev)
|
||||
*/
|
||||
|
||||
requeue:
|
||||
if (skb->next)
|
||||
if (unlikely(q == &noop_qdisc))
|
||||
kfree_skb(skb);
|
||||
else if (skb->next)
|
||||
dev->gso_skb = skb;
|
||||
else
|
||||
q->ops->requeue(skb, q);
|
||||
netif_schedule(dev);
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
BUG_ON((int) q->q.qlen < 0);
|
||||
return q->q.qlen;
|
||||
}
|
||||
|
||||
void __qdisc_run(struct net_device *dev)
|
||||
{
|
||||
if (unlikely(dev->qdisc == &noop_qdisc))
|
||||
goto out;
|
||||
do {
|
||||
if (!qdisc_restart(dev))
|
||||
break;
|
||||
} while (!netif_queue_stopped(dev));
|
||||
|
||||
while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
|
||||
/* NOTHING */;
|
||||
|
||||
out:
|
||||
clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
|
||||
}
|
||||
|
||||
@ -544,6 +545,7 @@ void dev_activate(struct net_device *dev)
|
||||
void dev_deactivate(struct net_device *dev)
|
||||
{
|
||||
struct Qdisc *qdisc;
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_bh(&dev->queue_lock);
|
||||
qdisc = dev->qdisc;
|
||||
@ -551,8 +553,12 @@ void dev_deactivate(struct net_device *dev)
|
||||
|
||||
qdisc_reset(qdisc);
|
||||
|
||||
skb = dev->gso_skb;
|
||||
dev->gso_skb = NULL;
|
||||
spin_unlock_bh(&dev->queue_lock);
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
dev_watchdog_down(dev);
|
||||
|
||||
/* Wait for outstanding dev_queue_xmit calls. */
|
||||
@ -561,11 +567,6 @@ void dev_deactivate(struct net_device *dev)
|
||||
/* Wait for outstanding qdisc_run calls. */
|
||||
while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
|
||||
yield();
|
||||
|
||||
if (dev->gso_skb) {
|
||||
kfree_skb(dev->gso_skb);
|
||||
dev->gso_skb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void dev_init_scheduler(struct net_device *dev)
|
||||
|
@ -94,14 +94,13 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
|
||||
struct net_device *dev = sch->dev;
|
||||
struct teql_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
__skb_queue_tail(&q->q, skb);
|
||||
if (q->q.qlen <= dev->tx_queue_len) {
|
||||
if (q->q.qlen < dev->tx_queue_len) {
|
||||
__skb_queue_tail(&q->q, skb);
|
||||
sch->bstats.bytes += skb->len;
|
||||
sch->bstats.packets++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__skb_unlink(skb, &q->q);
|
||||
kfree_skb(skb);
|
||||
sch->qstats.drops++;
|
||||
return NET_XMIT_DROP;
|
||||
|
@ -4164,6 +4164,7 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
|
||||
rwlock_t *addr_lock;
|
||||
int err = 0;
|
||||
void *addrs;
|
||||
void *buf;
|
||||
int bytes_copied = 0;
|
||||
|
||||
if (len != sizeof(struct sctp_getaddrs_old))
|
||||
@ -4217,13 +4218,14 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len,
|
||||
}
|
||||
}
|
||||
|
||||
buf = addrs;
|
||||
list_for_each(pos, &bp->address_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
memcpy(&temp, &addr->a, sizeof(temp));
|
||||
sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
|
||||
addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
|
||||
memcpy(addrs, &temp, addrlen);
|
||||
to += addrlen;
|
||||
memcpy(buf, &temp, addrlen);
|
||||
buf += addrlen;
|
||||
bytes_copied += addrlen;
|
||||
cnt ++;
|
||||
if (cnt >= getaddrs.addr_num) break;
|
||||
@ -4266,6 +4268,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
|
||||
size_t space_left;
|
||||
int bytes_copied = 0;
|
||||
void *addrs;
|
||||
void *buf;
|
||||
|
||||
if (len <= sizeof(struct sctp_getaddrs))
|
||||
return -EINVAL;
|
||||
@ -4316,6 +4319,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
|
||||
}
|
||||
}
|
||||
|
||||
buf = addrs;
|
||||
list_for_each(pos, &bp->address_list) {
|
||||
addr = list_entry(pos, struct sctp_sockaddr_entry, list);
|
||||
memcpy(&temp, &addr->a, sizeof(temp));
|
||||
@ -4325,8 +4329,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
|
||||
err = -ENOMEM; /*fixme: right error?*/
|
||||
goto error;
|
||||
}
|
||||
memcpy(addrs, &temp, addrlen);
|
||||
to += addrlen;
|
||||
memcpy(buf, &temp, addrlen);
|
||||
buf += addrlen;
|
||||
bytes_copied += addrlen;
|
||||
cnt ++;
|
||||
space_left -= addrlen;
|
||||
@ -5227,7 +5231,12 @@ int sctp_inet_listen(struct socket *sock, int backlog)
|
||||
/* Allocate HMAC for generating cookie. */
|
||||
if (sctp_hmac_alg) {
|
||||
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
|
||||
if (!tfm) {
|
||||
if (IS_ERR(tfm)) {
|
||||
if (net_ratelimit()) {
|
||||
printk(KERN_INFO
|
||||
"SCTP: failed to load transform for %s: %ld\n",
|
||||
sctp_hmac_alg, PTR_ERR(tfm));
|
||||
}
|
||||
err = -ENOSYS;
|
||||
goto out;
|
||||
}
|
||||
|
@ -141,11 +141,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
|
||||
* an ABORT, so we need to include it in the sac_info.
|
||||
*/
|
||||
if (chunk) {
|
||||
/* sctp_inqu_pop() has allready pulled off the chunk
|
||||
* header. We need to put it back temporarily
|
||||
*/
|
||||
skb_push(chunk->skb, sizeof(sctp_chunkhdr_t));
|
||||
|
||||
/* Copy the chunk data to a new skb and reserve enough
|
||||
* head room to use as notification.
|
||||
*/
|
||||
@ -155,9 +150,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
|
||||
if (!skb)
|
||||
goto fail;
|
||||
|
||||
/* put back the chunk header now that we have a copy */
|
||||
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
|
||||
|
||||
/* Embed the event fields inside the cloned skb. */
|
||||
event = sctp_skb2event(skb);
|
||||
sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
|
||||
@ -168,7 +160,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
|
||||
|
||||
/* Trim the buffer to the right length. */
|
||||
skb_trim(skb, sizeof(struct sctp_assoc_change) +
|
||||
ntohs(chunk->chunk_hdr->length));
|
||||
ntohs(chunk->chunk_hdr->length) -
|
||||
sizeof(sctp_chunkhdr_t));
|
||||
} else {
|
||||
event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change),
|
||||
MSG_NOTIFICATION, gfp);
|
||||
|
Loading…
Reference in New Issue
Block a user