2005-04-16 22:20:36 +00:00
|
|
|
/********************************************************************
|
|
|
|
Filename: via-ircc.c
|
|
|
|
Version: 1.0
|
|
|
|
Description: Driver for the VIA VT8231/VT8233 IrDA chipsets
|
|
|
|
Author: VIA Technologies,inc
|
|
|
|
Date : 08/06/2003
|
|
|
|
|
|
|
|
Copyright (c) 1998-2003 VIA Technologies, Inc.
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU General Public License as published by the Free Software
|
|
|
|
Foundation; either version 2, or (at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
See the GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License along with
|
|
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
|
|
|
|
F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
|
|
|
|
F02 Oct/28/02: Add SB device ID for 3147 and 3177.
|
|
|
|
Comment :
|
|
|
|
jul/09/2002 : only implement two kind of dongle currently.
|
|
|
|
Oct/02/2002 : work on VT8231 and VT8233 .
|
|
|
|
Aug/06/2003 : change driver format to pci driver .
|
|
|
|
|
|
|
|
2004-02-16: <sda@bdit.de>
|
|
|
|
- Removed unneeded 'legacy' pci stuff.
|
2011-03-31 01:57:33 +00:00
|
|
|
- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
|
2005-04-16 22:20:36 +00:00
|
|
|
- On speed change from core, don't send SIR frame with new speed.
|
|
|
|
Use current speed and change speeds later.
|
|
|
|
- Make module-param dongle_id actually work.
|
|
|
|
- New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only.
|
|
|
|
Tested with home-grown PCB on EPIA boards.
|
|
|
|
- Code cleanup.
|
|
|
|
|
|
|
|
********************************************************************/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
2011-06-06 10:43:46 +00:00
|
|
|
#include <linux/interrupt.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/dma-mapping.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/gfp.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
|
|
|
|
#include <linux/pm.h>
|
|
|
|
|
|
|
|
#include <net/irda/wrapper.h>
|
|
|
|
#include <net/irda/irda.h>
|
|
|
|
#include <net/irda/irda_device.h>
|
|
|
|
|
|
|
|
#include "via-ircc.h"
|
|
|
|
|
|
|
|
#define VIA_MODULE_NAME "via-ircc"
|
|
|
|
#define CHIP_IO_EXTENT 0x40
|
|
|
|
|
|
|
|
static char *driver_name = VIA_MODULE_NAME;
|
|
|
|
|
|
|
|
/* Module parameters */
|
|
|
|
static int qos_mtt_bits = 0x07; /* 1 ms or more */
|
|
|
|
static int dongle_id = 0; /* default: probe */
|
|
|
|
|
|
|
|
/* We can't guess the type of connected dongle, user *must* supply it. */
|
|
|
|
module_param(dongle_id, int, 0);
|
|
|
|
|
|
|
|
/* Some prototypes */
|
2012-12-06 14:30:56 +00:00
|
|
|
static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
|
2011-03-28 17:10:43 +00:00
|
|
|
unsigned int id);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int via_ircc_dma_receive(struct via_ircc_cb *self);
|
|
|
|
static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
|
|
|
|
int iobase);
|
2009-08-31 19:50:50 +00:00
|
|
|
static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
|
|
|
static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
|
|
|
|
struct net_device *dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
static void via_hw_init(struct via_ircc_cb *self);
|
|
|
|
static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int via_ircc_is_receiving(struct via_ircc_cb *self);
|
|
|
|
static int via_ircc_read_dongle_id(int iobase);
|
|
|
|
|
|
|
|
static int via_ircc_net_open(struct net_device *dev);
|
|
|
|
static int via_ircc_net_close(struct net_device *dev);
|
|
|
|
static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
|
|
|
|
int cmd);
|
|
|
|
static void via_ircc_change_dongle_speed(int iobase, int speed,
|
|
|
|
int dongle_id);
|
|
|
|
static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
|
|
|
|
static void hwreset(struct via_ircc_cb *self);
|
|
|
|
static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
|
|
|
|
static int upload_rxdata(struct via_ircc_cb *self, int iobase);
|
2012-12-03 14:24:13 +00:00
|
|
|
static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
|
2012-12-06 14:30:56 +00:00
|
|
|
static void via_remove_one(struct pci_dev *pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
|
|
|
|
static void iodelay(int udelay)
|
|
|
|
{
|
|
|
|
u8 data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < udelay; i++) {
|
|
|
|
data = inb(0x80);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-07 11:58:11 +00:00
|
|
|
static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
|
2005-04-16 22:20:36 +00:00
|
|
|
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
|
|
|
|
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
|
|
|
|
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
|
|
|
|
{ PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
|
|
|
|
{ PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
|
|
|
|
{ 0, }
|
|
|
|
};
|
|
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci,via_pci_tbl);
|
|
|
|
|
|
|
|
|
|
|
|
static struct pci_driver via_driver = {
|
|
|
|
.name = VIA_MODULE_NAME,
|
|
|
|
.id_table = via_pci_tbl,
|
|
|
|
.probe = via_init_one,
|
2012-12-03 14:24:13 +00:00
|
|
|
.remove = via_remove_one,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_init ()
|
|
|
|
*
|
|
|
|
* Initialize chip. Just find out chip type and resource.
|
|
|
|
*/
|
|
|
|
static int __init via_ircc_init(void)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
rc = pci_register_driver(&via_driver);
|
|
|
|
if (rc < 0) {
|
|
|
|
IRDA_DEBUG(0, "%s(): error rc = %d, returning -ENODEV...\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, rc);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-03 14:24:13 +00:00
|
|
|
static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
|
|
|
|
u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
|
|
|
|
chipio_t info;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
rc = pci_enable_device (pcidev);
|
|
|
|
if (rc) {
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
// South Bridge exist
|
|
|
|
if ( ReadLPCReg(0x20) != 0x3C )
|
|
|
|
Chipset=0x3096;
|
|
|
|
else
|
|
|
|
Chipset=0x3076;
|
|
|
|
|
|
|
|
if (Chipset==0x3076) {
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
WriteLPCReg(7,0x0c );
|
|
|
|
temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
|
|
|
|
if((temp&0x01)==1) { // BIOS close or no FIR
|
|
|
|
WriteLPCReg(0x1d, 0x82 );
|
|
|
|
WriteLPCReg(0x23,0x18);
|
|
|
|
temp=ReadLPCReg(0xF0);
|
|
|
|
if((temp&0x01)==0) {
|
|
|
|
temp=(ReadLPCReg(0x74)&0x03); //DMA
|
|
|
|
FirDRQ0=temp + 4;
|
|
|
|
temp=(ReadLPCReg(0x74)&0x0C) >> 2;
|
|
|
|
FirDRQ1=temp + 4;
|
|
|
|
} else {
|
|
|
|
temp=(ReadLPCReg(0x74)&0x0C) >> 2; //DMA
|
|
|
|
FirDRQ0=temp + 4;
|
|
|
|
FirDRQ1=FirDRQ0;
|
|
|
|
}
|
|
|
|
FirIRQ=(ReadLPCReg(0x70)&0x0f); //IRQ
|
|
|
|
FirIOBase=ReadLPCReg(0x60 ) << 8; //IO Space :high byte
|
|
|
|
FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
|
|
|
|
FirIOBase=FirIOBase ;
|
|
|
|
info.fir_base=FirIOBase;
|
|
|
|
info.irq=FirIRQ;
|
|
|
|
info.dma=FirDRQ1;
|
|
|
|
info.dma2=FirDRQ0;
|
|
|
|
pci_read_config_byte(pcidev,0x40,&bTmp);
|
|
|
|
pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
|
|
|
|
pci_read_config_byte(pcidev,0x42,&bTmp);
|
|
|
|
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
|
|
|
|
pci_write_config_byte(pcidev,0x5a,0xc0);
|
|
|
|
WriteLPCReg(0x28, 0x70 );
|
2013-08-16 20:48:14 +00:00
|
|
|
rc = via_ircc_open(pcidev, &info, 0x3076);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
rc = -ENODEV; //IR not turn on
|
|
|
|
} else { //Not VT1211
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
|
|
|
|
if((bTmp&0x01)==1) { // BIOS enable FIR
|
|
|
|
//Enable Double DMA clock
|
|
|
|
pci_read_config_byte(pcidev,0x42,&oldPCI_40);
|
|
|
|
pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
|
|
|
|
pci_read_config_byte(pcidev,0x40,&oldPCI_40);
|
|
|
|
pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
|
|
|
|
pci_read_config_byte(pcidev,0x44,&oldPCI_44);
|
|
|
|
pci_write_config_byte(pcidev,0x44,0x4e);
|
|
|
|
//---------- read configuration from Function0 of south bridge
|
|
|
|
if((bTmp&0x02)==0) {
|
|
|
|
pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
|
|
|
|
FirDRQ0 = (bTmp1 & 0x30) >> 4;
|
|
|
|
pci_read_config_byte(pcidev,0x44,&bTmp1);
|
|
|
|
FirDRQ1 = (bTmp1 & 0xc0) >> 6;
|
|
|
|
} else {
|
|
|
|
pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
|
|
|
|
FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
|
|
|
|
FirDRQ1=0;
|
|
|
|
}
|
|
|
|
pci_read_config_byte(pcidev,0x47,&bTmp1); //IRQ
|
|
|
|
FirIRQ = bTmp1 & 0x0f;
|
|
|
|
|
|
|
|
pci_read_config_byte(pcidev,0x69,&bTmp);
|
|
|
|
FirIOBase = bTmp << 8;//hight byte
|
|
|
|
pci_read_config_byte(pcidev,0x68,&bTmp);
|
|
|
|
FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
|
|
|
|
//-------------------------
|
|
|
|
info.fir_base=FirIOBase;
|
|
|
|
info.irq=FirIRQ;
|
|
|
|
info.dma=FirDRQ1;
|
|
|
|
info.dma2=FirDRQ0;
|
2013-08-16 20:48:14 +00:00
|
|
|
rc = via_ircc_open(pcidev, &info, 0x3096);
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
rc = -ENODEV; //IR not turn on !!!!!
|
|
|
|
}//Not VT1211
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
|
2005-04-16 22:20:36 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit via_ircc_cleanup(void)
|
|
|
|
{
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Cleanup all instances of the driver */
|
|
|
|
pci_unregister_driver (&via_driver);
|
|
|
|
}
|
|
|
|
|
2009-03-20 19:35:43 +00:00
|
|
|
static const struct net_device_ops via_ircc_sir_ops = {
|
|
|
|
.ndo_start_xmit = via_ircc_hard_xmit_sir,
|
|
|
|
.ndo_open = via_ircc_net_open,
|
|
|
|
.ndo_stop = via_ircc_net_close,
|
|
|
|
.ndo_do_ioctl = via_ircc_net_ioctl,
|
|
|
|
};
|
|
|
|
static const struct net_device_ops via_ircc_fir_ops = {
|
|
|
|
.ndo_start_xmit = via_ircc_hard_xmit_fir,
|
|
|
|
.ndo_open = via_ircc_net_open,
|
|
|
|
.ndo_stop = via_ircc_net_close,
|
|
|
|
.ndo_do_ioctl = via_ircc_net_ioctl,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2011-03-28 17:10:43 +00:00
|
|
|
* Function via_ircc_open(pdev, iobase, irq)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Open driver instance
|
|
|
|
*
|
|
|
|
*/
|
2012-12-06 14:30:56 +00:00
|
|
|
static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
int err;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Allocate new instance of the driver */
|
|
|
|
dev = alloc_irdadev(sizeof(struct via_ircc_cb));
|
|
|
|
if (dev == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
self->netdev = dev;
|
|
|
|
spin_lock_init(&self->lock);
|
|
|
|
|
2011-03-28 17:10:43 +00:00
|
|
|
pci_set_drvdata(pdev, self);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Initialize Resource */
|
|
|
|
self->io.cfg_base = info->cfg_base;
|
|
|
|
self->io.fir_base = info->fir_base;
|
|
|
|
self->io.irq = info->irq;
|
|
|
|
self->io.fir_ext = CHIP_IO_EXTENT;
|
|
|
|
self->io.dma = info->dma;
|
|
|
|
self->io.dma2 = info->dma2;
|
|
|
|
self->io.fifo_size = 32;
|
|
|
|
self->chip_id = id;
|
|
|
|
self->st_fifo.len = 0;
|
|
|
|
self->RxDataReady = 0;
|
|
|
|
|
|
|
|
/* Reserve the ioports that we need */
|
|
|
|
if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
|
|
|
|
IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, self->io.fir_base);
|
2005-04-16 22:20:36 +00:00
|
|
|
err = -ENODEV;
|
|
|
|
goto err_out1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize QoS for this device */
|
|
|
|
irda_init_max_qos_capabilies(&self->qos);
|
|
|
|
|
|
|
|
/* Check if user has supplied the dongle id or not */
|
|
|
|
if (!dongle_id)
|
|
|
|
dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
|
|
|
|
self->io.dongle_id = dongle_id;
|
|
|
|
|
|
|
|
/* The only value we must override it the baudrate */
|
2011-03-31 01:57:33 +00:00
|
|
|
/* Maximum speeds and capabilities are dongle-dependent. */
|
2005-04-16 22:20:36 +00:00
|
|
|
switch( self->io.dongle_id ){
|
|
|
|
case 0x0d:
|
|
|
|
self->qos.baud_rate.bits =
|
|
|
|
IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
|
|
|
|
IR_576000 | IR_1152000 | (IR_4000000 << 8);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
self->qos.baud_rate.bits =
|
|
|
|
IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Following was used for testing:
|
|
|
|
*
|
|
|
|
* self->qos.baud_rate.bits = IR_9600;
|
|
|
|
*
|
|
|
|
* Is is no good, as it prohibits (error-prone) speed-changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
self->qos.min_turn_time.bits = qos_mtt_bits;
|
|
|
|
irda_qos_bits_to_value(&self->qos);
|
|
|
|
|
|
|
|
/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
|
|
|
|
self->rx_buff.truesize = 14384 + 2048;
|
|
|
|
self->tx_buff.truesize = 14384 + 2048;
|
|
|
|
|
|
|
|
/* Allocate memory if needed */
|
|
|
|
self->rx_buff.head =
|
2013-08-27 05:45:23 +00:00
|
|
|
dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
|
|
|
|
&self->rx_buff_dma, GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (self->rx_buff.head == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out2;
|
|
|
|
}
|
|
|
|
|
|
|
|
self->tx_buff.head =
|
2013-08-27 05:45:23 +00:00
|
|
|
dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
|
|
|
|
&self->tx_buff_dma, GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (self->tx_buff.head == NULL) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_out3;
|
|
|
|
}
|
|
|
|
|
|
|
|
self->rx_buff.in_frame = FALSE;
|
|
|
|
self->rx_buff.state = OUTSIDE_FRAME;
|
|
|
|
self->tx_buff.data = self->tx_buff.head;
|
|
|
|
self->rx_buff.data = self->rx_buff.head;
|
|
|
|
|
|
|
|
/* Reset Tx queue info */
|
|
|
|
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
|
|
|
|
self->tx_fifo.tail = self->tx_buff.head;
|
|
|
|
|
|
|
|
/* Override the network functions we need to use */
|
2009-03-20 19:35:43 +00:00
|
|
|
dev->netdev_ops = &via_ircc_sir_ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
err = register_netdev(dev);
|
|
|
|
if (err)
|
|
|
|
goto err_out4;
|
|
|
|
|
|
|
|
IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
|
|
|
|
|
|
|
|
/* Initialise the hardware..
|
|
|
|
*/
|
|
|
|
self->io.speed = 9600;
|
|
|
|
via_hw_init(self);
|
|
|
|
return 0;
|
|
|
|
err_out4:
|
2011-03-28 17:12:52 +00:00
|
|
|
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->tx_buff.head, self->tx_buff_dma);
|
|
|
|
err_out3:
|
2011-03-28 17:12:52 +00:00
|
|
|
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->rx_buff.head, self->rx_buff_dma);
|
|
|
|
err_out2:
|
|
|
|
release_region(self->io.fir_base, self->io.fir_ext);
|
|
|
|
err_out1:
|
2011-03-28 17:10:43 +00:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-03-28 17:10:43 +00:00
|
|
|
* Function via_remove_one(pdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* Close driver instance
|
|
|
|
*
|
|
|
|
*/
|
2012-12-03 14:24:13 +00:00
|
|
|
static void via_remove_one(struct pci_dev *pdev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-03-28 17:10:43 +00:00
|
|
|
struct via_ircc_cb *self = pci_get_drvdata(pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
int iobase;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
|
|
|
|
ResetChip(iobase, 5); //hardware reset.
|
|
|
|
/* Remove netdevice */
|
|
|
|
unregister_netdev(self->netdev);
|
|
|
|
|
|
|
|
/* Release the PORT that this driver is using */
|
|
|
|
IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, self->io.fir_base);
|
2005-04-16 22:20:36 +00:00
|
|
|
release_region(self->io.fir_base, self->io.fir_ext);
|
|
|
|
if (self->tx_buff.head)
|
2011-03-28 17:12:52 +00:00
|
|
|
dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->tx_buff.head, self->tx_buff_dma);
|
|
|
|
if (self->rx_buff.head)
|
2011-03-28 17:12:52 +00:00
|
|
|
dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->rx_buff.head, self->rx_buff_dma);
|
2011-03-28 17:10:43 +00:00
|
|
|
pci_set_drvdata(pdev, NULL);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
free_netdev(self->netdev);
|
|
|
|
|
2011-03-28 17:10:43 +00:00
|
|
|
pci_disable_device(pdev);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_hw_init(self)
|
|
|
|
*
|
|
|
|
* Returns non-negative on success.
|
|
|
|
*
|
|
|
|
* Formerly via_ircc_setup
|
|
|
|
*/
|
|
|
|
static void via_hw_init(struct via_ircc_cb *self)
|
|
|
|
{
|
|
|
|
int iobase = self->io.fir_base;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
SetMaxRxPacketSize(iobase, 0x0fff); //set to max:4095
|
|
|
|
// FIFO Init
|
|
|
|
EnRXFIFOReadyInt(iobase, OFF);
|
|
|
|
EnRXFIFOHalfLevelInt(iobase, OFF);
|
|
|
|
EnTXFIFOHalfLevelInt(iobase, OFF);
|
|
|
|
EnTXFIFOUnderrunEOMInt(iobase, ON);
|
|
|
|
EnTXFIFOReadyInt(iobase, OFF);
|
|
|
|
InvertTX(iobase, OFF);
|
|
|
|
InvertRX(iobase, OFF);
|
|
|
|
|
|
|
|
if (ReadLPCReg(0x20) == 0x3c)
|
|
|
|
WriteLPCReg(0xF0, 0); // for VT1211
|
|
|
|
/* Int Init */
|
|
|
|
EnRXSpecInt(iobase, ON);
|
|
|
|
|
|
|
|
/* The following is basically hwreset */
|
|
|
|
/* If this is the case, why not just call hwreset() ? Jean II */
|
|
|
|
ResetChip(iobase, 5);
|
|
|
|
EnableDMA(iobase, OFF);
|
|
|
|
EnableTX(iobase, OFF);
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
EnTXDMA(iobase, OFF);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
TXStart(iobase, OFF);
|
|
|
|
InitCard(iobase);
|
|
|
|
CommonInit(iobase);
|
|
|
|
SIRFilter(iobase, ON);
|
|
|
|
SetSIR(iobase, ON);
|
|
|
|
CRC16(iobase, ON);
|
|
|
|
EnTXCRC(iobase, 0);
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x00);
|
|
|
|
SetBaudRate(iobase, 9600);
|
|
|
|
SetPulseWidth(iobase, 12);
|
|
|
|
SetSendPreambleCount(iobase, 0);
|
|
|
|
|
|
|
|
self->io.speed = 9600;
|
|
|
|
self->st_fifo.len = 0;
|
|
|
|
|
|
|
|
via_ircc_change_dongle_speed(iobase, self->io.speed,
|
|
|
|
self->io.dongle_id);
|
|
|
|
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x80);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_read_dongle_id (void)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_read_dongle_id(int iobase)
|
|
|
|
{
|
|
|
|
int dongle_id = 9; /* Default to IBM */
|
|
|
|
|
|
|
|
IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
|
|
|
|
return dongle_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
|
|
|
|
* Change speed of the attach dongle
|
|
|
|
* only implement two type of dongle currently.
|
|
|
|
*/
|
|
|
|
static void via_ircc_change_dongle_speed(int iobase, int speed,
|
|
|
|
int dongle_id)
|
|
|
|
{
|
|
|
|
u8 mode = 0;
|
|
|
|
|
|
|
|
/* speed is unused, as we use IsSIROn()/IsMIROn() */
|
|
|
|
speed = speed;
|
|
|
|
|
|
|
|
IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, speed, iobase, dongle_id);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
switch (dongle_id) {
|
|
|
|
|
|
|
|
/* Note: The dongle_id's listed here are derived from
|
|
|
|
* nsc-ircc.c */
|
|
|
|
|
|
|
|
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
|
|
|
|
UseOneRX(iobase, ON); // use one RX pin RX1,RX2
|
|
|
|
InvertTX(iobase, OFF);
|
|
|
|
InvertRX(iobase, OFF);
|
|
|
|
|
|
|
|
EnRX2(iobase, ON); //sir to rx2
|
|
|
|
EnGPIOtoRX2(iobase, OFF);
|
|
|
|
|
|
|
|
if (IsSIROn(iobase)) { //sir
|
|
|
|
// Mode select Off
|
|
|
|
SlowIRRXLowActive(iobase, ON);
|
|
|
|
udelay(1000);
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
} else {
|
|
|
|
if (IsMIROn(iobase)) { //mir
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
udelay(20);
|
|
|
|
} else { // fir
|
|
|
|
if (IsFIROn(iobase)) { //fir
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
udelay(20);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
|
|
|
|
UseOneRX(iobase, ON); //use ONE RX....RX1
|
|
|
|
InvertTX(iobase, OFF);
|
|
|
|
InvertRX(iobase, OFF); // invert RX pin
|
|
|
|
|
|
|
|
EnRX2(iobase, ON);
|
|
|
|
EnGPIOtoRX2(iobase, OFF);
|
|
|
|
if (IsSIROn(iobase)) { //sir
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, ON);
|
|
|
|
udelay(20);
|
|
|
|
// Mode select Off
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
}
|
|
|
|
if (IsMIROn(iobase)) { //mir
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
udelay(20);
|
|
|
|
// Mode select Off
|
|
|
|
SlowIRRXLowActive(iobase, ON);
|
|
|
|
} else { // fir
|
|
|
|
if (IsFIROn(iobase)) { //fir
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
// TX On
|
|
|
|
WriteTX(iobase, ON);
|
|
|
|
udelay(20);
|
|
|
|
// Mode select OFF
|
|
|
|
SlowIRRXLowActive(iobase, ON);
|
|
|
|
udelay(20);
|
|
|
|
// TX Off
|
|
|
|
WriteTX(iobase, OFF);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x0d:
|
|
|
|
UseOneRX(iobase, OFF); // use two RX pin RX1,RX2
|
|
|
|
InvertTX(iobase, OFF);
|
|
|
|
InvertRX(iobase, OFF);
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
if (IsSIROn(iobase)) { //sir
|
|
|
|
EnGPIOtoRX2(iobase, OFF);
|
|
|
|
WriteGIO(iobase, OFF);
|
|
|
|
EnRX2(iobase, OFF); //sir to rx2
|
|
|
|
} else { // fir mir
|
|
|
|
EnGPIOtoRX2(iobase, OFF);
|
|
|
|
WriteGIO(iobase, OFF);
|
|
|
|
EnRX2(iobase, OFF); //fir to rx
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x11: /* Temic TFDS4500 */
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
UseOneRX(iobase, ON); //use ONE RX....RX1
|
|
|
|
InvertTX(iobase, OFF);
|
|
|
|
InvertRX(iobase, ON); // invert RX pin
|
|
|
|
|
|
|
|
EnRX2(iobase, ON); //sir to rx2
|
|
|
|
EnGPIOtoRX2(iobase, OFF);
|
|
|
|
|
|
|
|
if( IsSIROn(iobase) ){ //sir
|
|
|
|
|
|
|
|
// Mode select On
|
|
|
|
SlowIRRXLowActive(iobase, ON);
|
|
|
|
udelay(20);
|
|
|
|
// Mode select Off
|
|
|
|
SlowIRRXLowActive(iobase, OFF);
|
|
|
|
|
|
|
|
} else{
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 0x0ff: /* Vishay */
|
|
|
|
if (IsSIROn(iobase))
|
|
|
|
mode = 0;
|
|
|
|
else if (IsMIROn(iobase))
|
|
|
|
mode = 1;
|
|
|
|
else if (IsFIROn(iobase))
|
|
|
|
mode = 2;
|
|
|
|
else if (IsVFIROn(iobase))
|
|
|
|
mode = 5; //VFIR-16
|
|
|
|
SI_SetMode(iobase, mode);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, dongle_id);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_change_speed (self, baud)
|
|
|
|
*
|
|
|
|
* Change the speed of the device
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
|
|
|
|
{
|
|
|
|
struct net_device *dev = self->netdev;
|
|
|
|
u16 iobase;
|
|
|
|
u8 value = 0, bTmp;
|
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
/* Update accounting for new speed */
|
|
|
|
self->io.speed = speed;
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x0);
|
|
|
|
|
|
|
|
/* Controller mode sellection */
|
|
|
|
switch (speed) {
|
|
|
|
case 2400:
|
|
|
|
case 9600:
|
|
|
|
case 19200:
|
|
|
|
case 38400:
|
|
|
|
case 57600:
|
|
|
|
case 115200:
|
|
|
|
value = (115200/speed)-1;
|
|
|
|
SetSIR(iobase, ON);
|
|
|
|
CRC16(iobase, ON);
|
|
|
|
break;
|
|
|
|
case 576000:
|
|
|
|
/* FIXME: this can't be right, as it's the same as 115200,
|
|
|
|
* and 576000 is MIR, not SIR. */
|
|
|
|
value = 0;
|
|
|
|
SetSIR(iobase, ON);
|
|
|
|
CRC16(iobase, ON);
|
|
|
|
break;
|
|
|
|
case 1152000:
|
|
|
|
value = 0;
|
|
|
|
SetMIR(iobase, ON);
|
|
|
|
/* FIXME: CRC ??? */
|
|
|
|
break;
|
|
|
|
case 4000000:
|
|
|
|
value = 0;
|
|
|
|
SetFIR(iobase, ON);
|
|
|
|
SetPulseWidth(iobase, 0);
|
|
|
|
SetSendPreambleCount(iobase, 14);
|
|
|
|
CRC16(iobase, OFF);
|
|
|
|
EnTXCRC(iobase, ON);
|
|
|
|
break;
|
|
|
|
case 16000000:
|
|
|
|
value = 0;
|
|
|
|
SetVFIR(iobase, ON);
|
|
|
|
/* FIXME: CRC ??? */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
value = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set baudrate to 0x19[2..7] */
|
|
|
|
bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
|
|
|
|
bTmp |= value << 2;
|
|
|
|
WriteReg(iobase, I_CF_H_1, bTmp);
|
|
|
|
|
|
|
|
/* Some dongles may need to be informed about speed changes. */
|
|
|
|
via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
|
|
|
|
|
|
|
|
/* Set FIFO size to 64 */
|
|
|
|
SetFIFO(iobase, 64);
|
|
|
|
|
|
|
|
/* Enable IR */
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x80);
|
|
|
|
|
|
|
|
// EnTXFIFOHalfLevelInt(iobase,ON);
|
|
|
|
|
|
|
|
/* Enable some interrupts so we can receive frames */
|
|
|
|
//EnAllInt(iobase,ON);
|
|
|
|
|
|
|
|
if (IsSIROn(iobase)) {
|
|
|
|
SIRFilter(iobase, ON);
|
|
|
|
SIRRecvAny(iobase, ON);
|
|
|
|
} else {
|
|
|
|
SIRFilter(iobase, OFF);
|
|
|
|
SIRRecvAny(iobase, OFF);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (speed > 115200) {
|
|
|
|
/* Install FIR xmit handler */
|
2009-03-20 19:35:43 +00:00
|
|
|
dev->netdev_ops = &via_ircc_fir_ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
} else {
|
|
|
|
/* Install SIR xmit handler */
|
2009-03-20 19:35:43 +00:00
|
|
|
dev->netdev_ops = &via_ircc_sir_ops;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
netif_wake_queue(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_hard_xmit (skb, dev)
|
|
|
|
*
|
|
|
|
* Transmit the frame!
|
|
|
|
*
|
|
|
|
*/
|
2009-08-31 19:50:50 +00:00
|
|
|
static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
unsigned long flags;
|
|
|
|
u16 iobase;
|
|
|
|
__u32 speed;
|
|
|
|
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2009-07-06 02:23:38 +00:00
|
|
|
IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
|
2005-04-16 22:20:36 +00:00
|
|
|
iobase = self->io.fir_base;
|
|
|
|
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
/* Check if we need to change the speed */
|
|
|
|
speed = irda_get_next_speed(skb);
|
|
|
|
if ((speed != self->io.speed) && (speed != -1)) {
|
|
|
|
/* Check for empty frame */
|
|
|
|
if (!skb->len) {
|
|
|
|
via_ircc_change_speed(self, speed);
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
dev_kfree_skb(skb);
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
self->new_speed = speed;
|
|
|
|
}
|
|
|
|
InitCard(iobase);
|
|
|
|
CommonInit(iobase);
|
|
|
|
SIRFilter(iobase, ON);
|
|
|
|
SetSIR(iobase, ON);
|
|
|
|
CRC16(iobase, ON);
|
|
|
|
EnTXCRC(iobase, 0);
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x00);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&self->lock, flags);
|
|
|
|
self->tx_buff.data = self->tx_buff.head;
|
|
|
|
self->tx_buff.len =
|
|
|
|
async_wrap_skb(skb, self->tx_buff.data,
|
|
|
|
self->tx_buff.truesize);
|
|
|
|
|
2009-01-06 18:40:43 +00:00
|
|
|
dev->stats.tx_bytes += self->tx_buff.len;
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Send this frame with old speed */
|
|
|
|
SetBaudRate(iobase, self->io.speed);
|
|
|
|
SetPulseWidth(iobase, 12);
|
|
|
|
SetSendPreambleCount(iobase, 0);
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x80);
|
|
|
|
|
|
|
|
EnableTX(iobase, ON);
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
|
|
|
|
ResetChip(iobase, 0);
|
|
|
|
ResetChip(iobase, 1);
|
|
|
|
ResetChip(iobase, 2);
|
|
|
|
ResetChip(iobase, 3);
|
|
|
|
ResetChip(iobase, 4);
|
|
|
|
|
|
|
|
EnAllInt(iobase, ON);
|
|
|
|
EnTXDMA(iobase, ON);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
|
|
|
|
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
|
|
|
|
DMA_TX_MODE);
|
|
|
|
|
|
|
|
SetSendByte(iobase, self->tx_buff.len);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
TXStart(iobase, ON);
|
|
|
|
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
spin_unlock_irqrestore(&self->lock, flags);
|
|
|
|
dev_kfree_skb(skb);
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2009-08-31 19:50:50 +00:00
|
|
|
static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
u16 iobase;
|
|
|
|
__u32 speed;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
iobase = self->io.fir_base;
|
|
|
|
|
|
|
|
if (self->st_fifo.len)
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (self->chip_id == 0x3076)
|
|
|
|
iodelay(1500);
|
|
|
|
else
|
|
|
|
udelay(1500);
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
speed = irda_get_next_speed(skb);
|
|
|
|
if ((speed != self->io.speed) && (speed != -1)) {
|
|
|
|
if (!skb->len) {
|
|
|
|
via_ircc_change_speed(self, speed);
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
dev_kfree_skb(skb);
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
} else
|
|
|
|
self->new_speed = speed;
|
|
|
|
}
|
|
|
|
spin_lock_irqsave(&self->lock, flags);
|
|
|
|
self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
|
|
|
|
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
|
|
|
|
|
|
|
|
self->tx_fifo.tail += skb->len;
|
2009-01-06 18:40:43 +00:00
|
|
|
dev->stats.tx_bytes += skb->len;
|
2007-03-27 21:55:52 +00:00
|
|
|
skb_copy_from_linear_data(skb,
|
|
|
|
self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
|
2005-04-16 22:20:36 +00:00
|
|
|
self->tx_fifo.len++;
|
|
|
|
self->tx_fifo.free++;
|
|
|
|
//F01 if (self->tx_fifo.len == 1) {
|
|
|
|
via_ircc_dma_xmit(self, iobase);
|
|
|
|
//F01 }
|
|
|
|
//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
spin_unlock_irqrestore(&self->lock, flags);
|
2009-06-23 06:03:08 +00:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
|
|
|
|
{
|
|
|
|
EnTXDMA(iobase, OFF);
|
|
|
|
self->io.direction = IO_XMIT;
|
|
|
|
EnPhys(iobase, ON);
|
|
|
|
EnableTX(iobase, ON);
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
ResetChip(iobase, 0);
|
|
|
|
ResetChip(iobase, 1);
|
|
|
|
ResetChip(iobase, 2);
|
|
|
|
ResetChip(iobase, 3);
|
|
|
|
ResetChip(iobase, 4);
|
|
|
|
EnAllInt(iobase, ON);
|
|
|
|
EnTXDMA(iobase, ON);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
irda_setup_dma(self->io.dma,
|
|
|
|
((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
|
|
|
|
self->tx_buff.head) + self->tx_buff_dma,
|
|
|
|
self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
|
|
|
|
IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, self->tx_fifo.ptr,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->tx_fifo.queue[self->tx_fifo.ptr].len,
|
|
|
|
self->tx_fifo.len);
|
|
|
|
|
|
|
|
SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
TXStart(iobase, ON);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_dma_xmit_complete (self)
|
|
|
|
*
|
|
|
|
* The transfer of a frame in finished. This function will only be called
|
|
|
|
* by the interrupt handler
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
|
|
|
|
{
|
|
|
|
int iobase;
|
|
|
|
int ret = TRUE;
|
|
|
|
u8 Tx_status;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
/* Disable DMA */
|
|
|
|
// DisableDmaChannel(self->io.dma);
|
2011-11-29 04:31:00 +00:00
|
|
|
/* Check for underrun! */
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Clear bit, by writing 1 into it */
|
|
|
|
Tx_status = GetTXStatus(iobase);
|
|
|
|
if (Tx_status & 0x08) {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.tx_errors++;
|
|
|
|
self->netdev->stats.tx_fifo_errors++;
|
2005-04-16 22:20:36 +00:00
|
|
|
hwreset(self);
|
2011-11-29 04:31:00 +00:00
|
|
|
/* how to clear underrun? */
|
2005-04-16 22:20:36 +00:00
|
|
|
} else {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.tx_packets++;
|
2005-04-16 22:20:36 +00:00
|
|
|
ResetChip(iobase, 3);
|
|
|
|
ResetChip(iobase, 4);
|
|
|
|
}
|
|
|
|
/* Check if we need to change the speed */
|
|
|
|
if (self->new_speed) {
|
|
|
|
via_ircc_change_speed(self, self->new_speed);
|
|
|
|
self->new_speed = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finished with this frame, so prepare for next */
|
|
|
|
if (IsFIROn(iobase)) {
|
|
|
|
if (self->tx_fifo.len) {
|
|
|
|
self->tx_fifo.len--;
|
|
|
|
self->tx_fifo.ptr++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
IRDA_DEBUG(1,
|
|
|
|
"%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__,
|
2005-04-16 22:20:36 +00:00
|
|
|
self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
|
|
|
|
/* F01_S
|
|
|
|
// Any frames to be sent back-to-back?
|
|
|
|
if (self->tx_fifo.len) {
|
|
|
|
// Not finished yet!
|
|
|
|
via_ircc_dma_xmit(self, iobase);
|
|
|
|
ret = FALSE;
|
|
|
|
} else {
|
|
|
|
F01_E*/
|
|
|
|
// Reset Tx FIFO info
|
|
|
|
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
|
|
|
|
self->tx_fifo.tail = self->tx_buff.head;
|
|
|
|
//F01 }
|
|
|
|
|
|
|
|
// Make sure we have room for more frames
|
|
|
|
//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
|
|
|
|
// Not busy transmitting anymore
|
|
|
|
// Tell the network layer, that we can accept more frames
|
|
|
|
netif_wake_queue(self->netdev);
|
|
|
|
//F01 }
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_dma_receive (self)
|
|
|
|
*
|
|
|
|
* Set configuration for receive a frame.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_dma_receive(struct via_ircc_cb *self)
|
|
|
|
{
|
|
|
|
int iobase;
|
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
|
|
|
|
self->tx_fifo.tail = self->tx_buff.head;
|
|
|
|
self->RxDataReady = 0;
|
|
|
|
self->io.direction = IO_RECV;
|
|
|
|
self->rx_buff.data = self->rx_buff.head;
|
|
|
|
self->st_fifo.len = self->st_fifo.pending_bytes = 0;
|
|
|
|
self->st_fifo.tail = self->st_fifo.head = 0;
|
|
|
|
|
|
|
|
EnPhys(iobase, ON);
|
|
|
|
EnableTX(iobase, OFF);
|
|
|
|
EnableRX(iobase, ON);
|
|
|
|
|
|
|
|
ResetChip(iobase, 0);
|
|
|
|
ResetChip(iobase, 1);
|
|
|
|
ResetChip(iobase, 2);
|
|
|
|
ResetChip(iobase, 3);
|
|
|
|
ResetChip(iobase, 4);
|
|
|
|
|
|
|
|
EnAllInt(iobase, ON);
|
|
|
|
EnTXDMA(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, ON);
|
|
|
|
irda_setup_dma(self->io.dma2, self->rx_buff_dma,
|
|
|
|
self->rx_buff.truesize, DMA_RX_MODE);
|
|
|
|
TXStart(iobase, OFF);
|
|
|
|
RXStart(iobase, ON);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_dma_receive_complete (self)
|
|
|
|
*
|
|
|
|
* Controller Finished with receiving frames,
|
|
|
|
* and this routine is call by ISR
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
|
|
|
|
int iobase)
|
|
|
|
{
|
|
|
|
struct st_fifo *st_fifo;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int len, i;
|
|
|
|
u8 status = 0;
|
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
st_fifo = &self->st_fifo;
|
|
|
|
|
|
|
|
if (self->io.speed < 4000000) { //Speed below FIR
|
|
|
|
len = GetRecvByte(iobase, self);
|
|
|
|
skb = dev_alloc_skb(len + 1);
|
|
|
|
if (skb == NULL)
|
|
|
|
return FALSE;
|
|
|
|
// Make sure IP header gets aligned
|
|
|
|
skb_reserve(skb, 1);
|
|
|
|
skb_put(skb, len - 2);
|
|
|
|
if (self->chip_id == 0x3076) {
|
|
|
|
for (i = 0; i < len - 2; i++)
|
|
|
|
skb->data[i] = self->rx_buff.data[i * 2];
|
|
|
|
} else {
|
|
|
|
if (self->chip_id == 0x3096) {
|
|
|
|
for (i = 0; i < len - 2; i++)
|
|
|
|
skb->data[i] =
|
|
|
|
self->rx_buff.data[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Move to next frame
|
|
|
|
self->rx_buff.data += len;
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_bytes += len;
|
|
|
|
self->netdev->stats.rx_packets++;
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->dev = self->netdev;
|
2007-03-19 22:30:44 +00:00
|
|
|
skb_reset_mac_header(skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->protocol = htons(ETH_P_IRDA);
|
|
|
|
netif_rx(skb);
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
else { //FIR mode
|
|
|
|
len = GetRecvByte(iobase, self);
|
|
|
|
if (len == 0)
|
|
|
|
return TRUE; //interrupt only, data maybe move by RxT
|
|
|
|
if (((len - 4) < 2) || ((len - 4) > 2048)) {
|
|
|
|
IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, len, RxCurCount(iobase, self),
|
2005-04-16 22:20:36 +00:00
|
|
|
self->RxLastCount);
|
|
|
|
hwreset(self);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__,
|
2005-04-16 22:20:36 +00:00
|
|
|
st_fifo->len, len - 4, RxCurCount(iobase, self));
|
|
|
|
|
|
|
|
st_fifo->entries[st_fifo->tail].status = status;
|
|
|
|
st_fifo->entries[st_fifo->tail].len = len;
|
|
|
|
st_fifo->pending_bytes += len;
|
|
|
|
st_fifo->tail++;
|
|
|
|
st_fifo->len++;
|
|
|
|
if (st_fifo->tail > MAX_RX_WINDOW)
|
|
|
|
st_fifo->tail = 0;
|
|
|
|
self->RxDataReady = 0;
|
|
|
|
|
|
|
|
// It maybe have MAX_RX_WINDOW package receive by
|
|
|
|
// receive_complete before Timer IRQ
|
|
|
|
/* F01_S
|
|
|
|
if (st_fifo->len < (MAX_RX_WINDOW+2 )) {
|
|
|
|
RXStart(iobase,ON);
|
|
|
|
SetTimer(iobase,4);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
F01_E */
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
//F01_S
|
|
|
|
// Put this entry back in fifo
|
|
|
|
if (st_fifo->head > MAX_RX_WINDOW)
|
|
|
|
st_fifo->head = 0;
|
|
|
|
status = st_fifo->entries[st_fifo->head].status;
|
|
|
|
len = st_fifo->entries[st_fifo->head].len;
|
|
|
|
st_fifo->head++;
|
|
|
|
st_fifo->len--;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(len + 1 - 4);
|
|
|
|
/*
|
2010-08-24 04:38:33 +00:00
|
|
|
* if frame size, data ptr, or skb ptr are wrong, then get next
|
2005-04-16 22:20:36 +00:00
|
|
|
* entry.
|
|
|
|
*/
|
2009-12-03 07:58:21 +00:00
|
|
|
if ((skb == NULL) || (skb->data == NULL) ||
|
|
|
|
(self->rx_buff.data == NULL) || (len < 6)) {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_dropped++;
|
2010-08-24 04:38:33 +00:00
|
|
|
kfree_skb(skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
skb_reserve(skb, 1);
|
|
|
|
skb_put(skb, len - 4);
|
|
|
|
|
2007-03-31 14:55:19 +00:00
|
|
|
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
|
2005-04-16 22:20:36 +00:00
|
|
|
len - 4, self->rx_buff.data);
|
|
|
|
|
|
|
|
// Move to next frame
|
|
|
|
self->rx_buff.data += len;
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_bytes += len;
|
|
|
|
self->netdev->stats.rx_packets++;
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->dev = self->netdev;
|
2007-03-19 22:30:44 +00:00
|
|
|
skb_reset_mac_header(skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->protocol = htons(ETH_P_IRDA);
|
|
|
|
netif_rx(skb);
|
|
|
|
|
|
|
|
//F01_E
|
|
|
|
} //FIR
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if frame is received , but no INT ,then use this routine to upload frame.
|
|
|
|
*/
|
|
|
|
static int upload_rxdata(struct via_ircc_cb *self, int iobase)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int len;
|
|
|
|
struct st_fifo *st_fifo;
|
|
|
|
st_fifo = &self->st_fifo;
|
|
|
|
|
|
|
|
len = GetRecvByte(iobase, self);
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-09-26 05:31:03 +00:00
|
|
|
if ((len - 4) < 2) {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_dropped++;
|
2006-09-26 05:31:03 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
skb = dev_alloc_skb(len + 1);
|
2006-09-26 05:31:03 +00:00
|
|
|
if (skb == NULL) {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_dropped++;
|
2005-04-16 22:20:36 +00:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
skb_reserve(skb, 1);
|
|
|
|
skb_put(skb, len - 4 + 1);
|
2007-03-31 14:55:19 +00:00
|
|
|
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
st_fifo->tail++;
|
|
|
|
st_fifo->len++;
|
|
|
|
if (st_fifo->tail > MAX_RX_WINDOW)
|
|
|
|
st_fifo->tail = 0;
|
|
|
|
// Move to next frame
|
|
|
|
self->rx_buff.data += len;
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_bytes += len;
|
|
|
|
self->netdev->stats.rx_packets++;
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->dev = self->netdev;
|
2007-03-19 22:30:44 +00:00
|
|
|
skb_reset_mac_header(skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->protocol = htons(ETH_P_IRDA);
|
|
|
|
netif_rx(skb);
|
|
|
|
if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
|
|
|
|
RXStart(iobase, ON);
|
|
|
|
} else {
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implement back to back receive , use this routine to upload data.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
|
|
|
|
{
|
|
|
|
struct st_fifo *st_fifo;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
int len;
|
|
|
|
u8 status;
|
|
|
|
|
|
|
|
st_fifo = &self->st_fifo;
|
|
|
|
|
|
|
|
if (CkRxRecv(iobase, self)) {
|
|
|
|
// if still receiving ,then return ,don't upload frame
|
|
|
|
self->RetryCount = 0;
|
|
|
|
SetTimer(iobase, 20);
|
|
|
|
self->RxDataReady++;
|
|
|
|
return FALSE;
|
|
|
|
} else
|
|
|
|
self->RetryCount++;
|
|
|
|
|
|
|
|
if ((self->RetryCount >= 1) ||
|
2009-12-03 07:58:21 +00:00
|
|
|
((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
|
|
|
|
(st_fifo->len >= (MAX_RX_WINDOW))) {
|
2005-04-16 22:20:36 +00:00
|
|
|
while (st_fifo->len > 0) { //upload frame
|
|
|
|
// Put this entry back in fifo
|
|
|
|
if (st_fifo->head > MAX_RX_WINDOW)
|
|
|
|
st_fifo->head = 0;
|
|
|
|
status = st_fifo->entries[st_fifo->head].status;
|
|
|
|
len = st_fifo->entries[st_fifo->head].len;
|
|
|
|
st_fifo->head++;
|
|
|
|
st_fifo->len--;
|
|
|
|
|
|
|
|
skb = dev_alloc_skb(len + 1 - 4);
|
|
|
|
/*
|
|
|
|
* if frame size, data ptr, or skb ptr are wrong,
|
|
|
|
* then get next entry.
|
|
|
|
*/
|
2009-12-03 07:58:21 +00:00
|
|
|
if ((skb == NULL) || (skb->data == NULL) ||
|
|
|
|
(self->rx_buff.data == NULL) || (len < 6)) {
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_dropped++;
|
2005-04-16 22:20:36 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
skb_reserve(skb, 1);
|
|
|
|
skb_put(skb, len - 4);
|
2007-03-31 14:55:19 +00:00
|
|
|
skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
|
2005-04-16 22:20:36 +00:00
|
|
|
len - 4, st_fifo->head);
|
|
|
|
|
|
|
|
// Move to next frame
|
|
|
|
self->rx_buff.data += len;
|
2009-01-06 18:40:43 +00:00
|
|
|
self->netdev->stats.rx_bytes += len;
|
|
|
|
self->netdev->stats.rx_packets++;
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->dev = self->netdev;
|
2007-03-19 22:30:44 +00:00
|
|
|
skb_reset_mac_header(skb);
|
2005-04-16 22:20:36 +00:00
|
|
|
skb->protocol = htons(ETH_P_IRDA);
|
|
|
|
netif_rx(skb);
|
|
|
|
} //while
|
|
|
|
self->RetryCount = 0;
|
|
|
|
|
|
|
|
IRDA_DEBUG(2,
|
|
|
|
"%s(): End of upload HostStatus=%x,RxStatus=%x\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__,
|
2005-04-16 22:20:36 +00:00
|
|
|
GetHostStatus(iobase), GetRXStatus(iobase));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if frame is receive complete at this routine ,then upload
|
|
|
|
* frame.
|
|
|
|
*/
|
2009-12-03 07:58:21 +00:00
|
|
|
if ((GetRXStatus(iobase) & 0x10) &&
|
|
|
|
(RxCurCount(iobase, self) != self->RxLastCount)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
upload_rxdata(self, iobase);
|
|
|
|
if (irda_device_txqueue_empty(self->netdev))
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
}
|
|
|
|
} // timer detect complete
|
|
|
|
else
|
|
|
|
SetTimer(iobase, 4);
|
|
|
|
return TRUE;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
* Function via_ircc_interrupt (irq, dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* An interrupt from the chip has arrived. Time to do some work
|
|
|
|
*
|
|
|
|
*/
|
2007-10-29 09:46:16 +00:00
|
|
|
static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-10-29 09:46:16 +00:00
|
|
|
struct net_device *dev = dev_id;
|
2008-11-13 07:38:14 +00:00
|
|
|
struct via_ircc_cb *self = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
int iobase;
|
|
|
|
u8 iHostIntType, iRxIntType, iTxIntType;
|
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
spin_lock(&self->lock);
|
|
|
|
iHostIntType = GetHostStatus(iobase);
|
|
|
|
|
|
|
|
IRDA_DEBUG(4, "%s(): iHostIntType %02x: %s %s %s %02x\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, iHostIntType,
|
2005-04-16 22:20:36 +00:00
|
|
|
(iHostIntType & 0x40) ? "Timer" : "",
|
|
|
|
(iHostIntType & 0x20) ? "Tx" : "",
|
|
|
|
(iHostIntType & 0x10) ? "Rx" : "",
|
|
|
|
(iHostIntType & 0x0e) >> 1);
|
|
|
|
|
|
|
|
if ((iHostIntType & 0x40) != 0) { //Timer Event
|
|
|
|
self->EventFlag.TimeOut++;
|
|
|
|
ClearTimerInt(iobase, 1);
|
|
|
|
if (self->io.direction == IO_XMIT) {
|
|
|
|
via_ircc_dma_xmit(self, iobase);
|
|
|
|
}
|
|
|
|
if (self->io.direction == IO_RECV) {
|
|
|
|
/*
|
|
|
|
* frame ready hold too long, must reset.
|
|
|
|
*/
|
|
|
|
if (self->RxDataReady > 30) {
|
|
|
|
hwreset(self);
|
|
|
|
if (irda_device_txqueue_empty(self->netdev)) {
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
}
|
|
|
|
} else { // call this to upload frame.
|
|
|
|
RxTimerHandler(self, iobase);
|
|
|
|
}
|
|
|
|
} //RECV
|
|
|
|
} //Timer Event
|
|
|
|
if ((iHostIntType & 0x20) != 0) { //Tx Event
|
|
|
|
iTxIntType = GetTXStatus(iobase);
|
|
|
|
|
|
|
|
IRDA_DEBUG(4, "%s(): iTxIntType %02x: %s %s %s %s\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, iTxIntType,
|
2005-04-16 22:20:36 +00:00
|
|
|
(iTxIntType & 0x08) ? "FIFO underr." : "",
|
|
|
|
(iTxIntType & 0x04) ? "EOM" : "",
|
|
|
|
(iTxIntType & 0x02) ? "FIFO ready" : "",
|
|
|
|
(iTxIntType & 0x01) ? "Early EOM" : "");
|
|
|
|
|
|
|
|
if (iTxIntType & 0x4) {
|
|
|
|
self->EventFlag.EOMessage++; // read and will auto clean
|
|
|
|
if (via_ircc_dma_xmit_complete(self)) {
|
|
|
|
if (irda_device_txqueue_empty
|
|
|
|
(self->netdev)) {
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self->EventFlag.Unknown++;
|
|
|
|
}
|
|
|
|
} //EOP
|
|
|
|
} //Tx Event
|
|
|
|
//----------------------------------------
|
|
|
|
if ((iHostIntType & 0x10) != 0) { //Rx Event
|
|
|
|
/* Check if DMA has finished */
|
|
|
|
iRxIntType = GetRXStatus(iobase);
|
|
|
|
|
|
|
|
IRDA_DEBUG(4, "%s(): iRxIntType %02x: %s %s %s %s %s %s %s\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, iRxIntType,
|
2005-04-16 22:20:36 +00:00
|
|
|
(iRxIntType & 0x80) ? "PHY err." : "",
|
|
|
|
(iRxIntType & 0x40) ? "CRC err" : "",
|
|
|
|
(iRxIntType & 0x20) ? "FIFO overr." : "",
|
|
|
|
(iRxIntType & 0x10) ? "EOF" : "",
|
|
|
|
(iRxIntType & 0x08) ? "RxData" : "",
|
|
|
|
(iRxIntType & 0x02) ? "RxMaxLen" : "",
|
|
|
|
(iRxIntType & 0x01) ? "SIR bad" : "");
|
|
|
|
if (!iRxIntType)
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (iRxIntType & 0x10) {
|
|
|
|
if (via_ircc_dma_receive_complete(self, iobase)) {
|
|
|
|
//F01 if(!(IsFIROn(iobase))) via_ircc_dma_receive(self);
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
}
|
|
|
|
} // No ERR
|
|
|
|
else { //ERR
|
|
|
|
IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
|
2008-07-31 00:20:18 +00:00
|
|
|
__func__, iRxIntType, iHostIntType,
|
2005-04-16 22:20:36 +00:00
|
|
|
RxCurCount(iobase, self),
|
|
|
|
self->RxLastCount);
|
|
|
|
|
|
|
|
if (iRxIntType & 0x20) { //FIFO OverRun ERR
|
|
|
|
ResetChip(iobase, 0);
|
|
|
|
ResetChip(iobase, 1);
|
|
|
|
} else { //PHY,CRC ERR
|
|
|
|
|
|
|
|
if (iRxIntType != 0x08)
|
|
|
|
hwreset(self); //F01
|
|
|
|
}
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
} //ERR
|
|
|
|
|
|
|
|
} //Rx Event
|
|
|
|
spin_unlock(&self->lock);
|
|
|
|
return IRQ_RETVAL(iHostIntType);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hwreset(struct via_ircc_cb *self)
|
|
|
|
{
|
|
|
|
int iobase;
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ResetChip(iobase, 5);
|
|
|
|
EnableDMA(iobase, OFF);
|
|
|
|
EnableTX(iobase, OFF);
|
|
|
|
EnableRX(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
EnTXDMA(iobase, OFF);
|
|
|
|
RXStart(iobase, OFF);
|
|
|
|
TXStart(iobase, OFF);
|
|
|
|
InitCard(iobase);
|
|
|
|
CommonInit(iobase);
|
|
|
|
SIRFilter(iobase, ON);
|
|
|
|
SetSIR(iobase, ON);
|
|
|
|
CRC16(iobase, ON);
|
|
|
|
EnTXCRC(iobase, 0);
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x00);
|
|
|
|
SetBaudRate(iobase, 9600);
|
|
|
|
SetPulseWidth(iobase, 12);
|
|
|
|
SetSendPreambleCount(iobase, 0);
|
|
|
|
WriteReg(iobase, I_ST_CT_0, 0x80);
|
|
|
|
|
|
|
|
/* Restore speed. */
|
|
|
|
via_ircc_change_speed(self, self->io.speed);
|
|
|
|
|
|
|
|
self->st_fifo.len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_is_receiving (self)
|
|
|
|
*
|
|
|
|
* Return TRUE is we are currently receiving a frame
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_is_receiving(struct via_ircc_cb *self)
|
|
|
|
{
|
|
|
|
int status = FALSE;
|
|
|
|
int iobase;
|
|
|
|
|
|
|
|
IRDA_ASSERT(self != NULL, return FALSE;);
|
|
|
|
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
if (CkRxRecv(iobase, self))
|
|
|
|
status = TRUE;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_net_open (dev)
|
|
|
|
*
|
|
|
|
* Start the device
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_net_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
int iobase;
|
|
|
|
char hwname[32];
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
IRDA_ASSERT(dev != NULL, return -1;);
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2009-01-06 18:40:43 +00:00
|
|
|
dev->stats.rx_packets = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
IRDA_ASSERT(self != NULL, return 0;);
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
|
|
|
|
IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
|
|
|
|
self->io.irq);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Always allocate the DMA channel after the IRQ, and clean up on
|
|
|
|
* failure.
|
|
|
|
*/
|
|
|
|
if (request_dma(self->io.dma, dev->name)) {
|
|
|
|
IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
|
|
|
|
self->io.dma);
|
2012-03-11 11:49:02 +00:00
|
|
|
free_irq(self->io.irq, dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
if (self->io.dma2 != self->io.dma) {
|
|
|
|
if (request_dma(self->io.dma2, dev->name)) {
|
|
|
|
IRDA_WARNING("%s, unable to allocate dma2=%d\n",
|
|
|
|
driver_name, self->io.dma2);
|
2012-03-11 11:49:02 +00:00
|
|
|
free_irq(self->io.irq, dev);
|
2008-07-08 10:06:46 +00:00
|
|
|
free_dma(self->io.dma);
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* turn on interrupts */
|
|
|
|
EnAllInt(iobase, ON);
|
|
|
|
EnInternalLoop(iobase, OFF);
|
|
|
|
EnExternalLoop(iobase, OFF);
|
|
|
|
|
|
|
|
/* */
|
|
|
|
via_ircc_dma_receive(self);
|
|
|
|
|
|
|
|
/* Ready to play! */
|
|
|
|
netif_start_queue(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open new IrLAP layer instance, now that everything should be
|
|
|
|
* initialized properly
|
|
|
|
*/
|
|
|
|
sprintf(hwname, "VIA @ 0x%x", iobase);
|
|
|
|
self->irlap = irlap_open(dev, &self->qos, hwname);
|
|
|
|
|
|
|
|
self->RxLastCount = 0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_net_close (dev)
|
|
|
|
*
|
|
|
|
* Stop the device
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_net_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
int iobase;
|
|
|
|
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(3, "%s()\n", __func__);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
IRDA_ASSERT(dev != NULL, return -1;);
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
IRDA_ASSERT(self != NULL, return 0;);
|
|
|
|
|
|
|
|
/* Stop device */
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
/* Stop and remove instance of IrLAP */
|
|
|
|
if (self->irlap)
|
|
|
|
irlap_close(self->irlap);
|
|
|
|
self->irlap = NULL;
|
|
|
|
iobase = self->io.fir_base;
|
|
|
|
EnTXDMA(iobase, OFF);
|
|
|
|
EnRXDMA(iobase, OFF);
|
|
|
|
DisableDmaChannel(self->io.dma);
|
|
|
|
|
|
|
|
/* Disable interrupts */
|
|
|
|
EnAllInt(iobase, OFF);
|
|
|
|
free_irq(self->io.irq, dev);
|
|
|
|
free_dma(self->io.dma);
|
2008-07-08 10:06:46 +00:00
|
|
|
if (self->io.dma2 != self->io.dma)
|
|
|
|
free_dma(self->io.dma2);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Function via_ircc_net_ioctl (dev, rq, cmd)
|
|
|
|
*
|
|
|
|
* Process IOCTL commands for this device
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
|
|
|
|
int cmd)
|
|
|
|
{
|
|
|
|
struct if_irda_req *irq = (struct if_irda_req *) rq;
|
|
|
|
struct via_ircc_cb *self;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
IRDA_ASSERT(dev != NULL, return -1;);
|
2008-11-13 07:38:14 +00:00
|
|
|
self = netdev_priv(dev);
|
2005-04-16 22:20:36 +00:00
|
|
|
IRDA_ASSERT(self != NULL, return -1;);
|
2008-07-31 00:20:18 +00:00
|
|
|
IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
|
2005-04-16 22:20:36 +00:00
|
|
|
cmd);
|
|
|
|
/* Disable interrupts & save flags */
|
|
|
|
spin_lock_irqsave(&self->lock, flags);
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCSBANDWIDTH: /* Set bandwidth */
|
|
|
|
if (!capable(CAP_NET_ADMIN)) {
|
|
|
|
ret = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
via_ircc_change_speed(self, irq->ifr_baudrate);
|
|
|
|
break;
|
|
|
|
case SIOCSMEDIABUSY: /* Set media busy */
|
|
|
|
if (!capable(CAP_NET_ADMIN)) {
|
|
|
|
ret = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
irda_device_set_media_busy(self->netdev, TRUE);
|
|
|
|
break;
|
|
|
|
case SIOCGRECEIVING: /* Check if we are receiving right now */
|
|
|
|
irq->ifr_receiving = via_ircc_is_receiving(self);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&self->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
MODULE_AUTHOR("VIA Technologies,inc");
|
|
|
|
MODULE_DESCRIPTION("VIA IrDA Device Driver");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
|
|
|
module_init(via_ircc_init);
|
|
|
|
module_exit(via_ircc_cleanup);
|