mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 22:21:42 +00:00
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
This commit is contained in:
commit
68984aedab
93
Documentation/arm/Samsung-S3C24XX/USB-Host.txt
Normal file
93
Documentation/arm/Samsung-S3C24XX/USB-Host.txt
Normal file
@ -0,0 +1,93 @@
|
||||
S3C24XX USB Host support
|
||||
========================
|
||||
|
||||
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
This document details the S3C2410/S3C2440 in-built OHCI USB host support.
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
||||
Enable at least the following kernel options:
|
||||
|
||||
menuconfig:
|
||||
|
||||
Device Drivers --->
|
||||
USB support --->
|
||||
<*> Support for Host-side USB
|
||||
<*> OHCI HCD support
|
||||
|
||||
|
||||
.config:
|
||||
CONFIG_USB
|
||||
CONFIG_USB_OHCI_HCD
|
||||
|
||||
|
||||
Once these options are configured, the standard set of USB device
|
||||
drivers can be configured and used.
|
||||
|
||||
|
||||
Board Support
|
||||
-------------
|
||||
|
||||
The driver attaches to a platform device, which will need to be
|
||||
added by the board specific support file in linux/arch/arm/mach-s3c2410,
|
||||
such as mach-bast.c or mach-smdk2410.c
|
||||
|
||||
The platform device's platform_data field is only needed if the
|
||||
board implements extra power control or over-current monitoring.
|
||||
|
||||
The OHCI driver does not ensure the state of the S3C2410's MISCCTRL
|
||||
register, so if both ports are to be used for the host, then it is
|
||||
the board support file's responsibility to ensure that the second
|
||||
port is configured to be connected to the OHCI core.
|
||||
|
||||
|
||||
Platform Data
|
||||
-------------
|
||||
|
||||
See linux/include/asm-arm/arch-s3c2410/usb-control.h for the
|
||||
descriptions of the platform device data. An implementation
|
||||
can be found in linux/arch/arm/mach-s3c2410/usb-simtec.c .
|
||||
|
||||
The `struct s3c2410_hcd_info` contains a pair of functions
|
||||
that get called to enable over-current detection, and to
|
||||
control the port power status.
|
||||
|
||||
The ports are numbered 0 and 1.
|
||||
|
||||
power_control:
|
||||
|
||||
Called to enable or disable the power on the port.
|
||||
|
||||
enable_oc:
|
||||
|
||||
Called to enable or disable the over-current monitoring.
|
||||
This should claim or release the resources being used to
|
||||
check the power condition on the port, such as an IRQ.
|
||||
|
||||
report_oc:
|
||||
|
||||
The OHCI driver fills this field in for the over-current code
|
||||
to call when there is a change to the over-current state on
|
||||
an port. The ports argument is a bitmask of 1 bit per port,
|
||||
with bit X being 1 for an over-current on port X.
|
||||
|
||||
The function s3c2410_usb_report_oc() has been provided to
|
||||
ensure this is called correctly.
|
||||
|
||||
port[x]:
|
||||
|
||||
This is struct describes each port, 0 or 1. The platform driver
|
||||
should set the flags field of each port to S3C_HCDFLG_USED if
|
||||
the port is enabled.
|
||||
|
||||
|
||||
|
||||
Document Author
|
||||
---------------
|
||||
|
||||
Ben Dooks, (c) 2005 Simtec Electronics
|
@ -784,7 +784,7 @@ DVB SUBSYSTEM AND DRIVERS
|
||||
P: LinuxTV.org Project
|
||||
M: linux-dvb-maintainer@linuxtv.org
|
||||
L: linux-dvb@linuxtv.org (subscription required)
|
||||
W: http://linuxtv.org/developer/dvb.xml
|
||||
W: http://linuxtv.org/
|
||||
S: Supported
|
||||
|
||||
EATA-DMA SCSI DRIVER
|
||||
|
@ -533,6 +533,13 @@ ENTRY(__switch_to)
|
||||
ldr r3, [r2, #TI_TP_VALUE]
|
||||
stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
|
||||
ldr r6, [r2, #TI_CPU_DOMAIN]!
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
#ifdef CONFIG_CPU_MPCORE
|
||||
clrex
|
||||
#else
|
||||
strex r3, r4, [ip] @ Clear exclusive monitor
|
||||
#endif
|
||||
#endif
|
||||
#if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
|
||||
mra r4, r5, acc0
|
||||
stmia ip, {r4, r5}
|
||||
|
@ -1,4 +1,6 @@
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
#include <linux/config.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_MPCORE)
|
||||
.macro bitop, instr
|
||||
mov r2, #1
|
||||
and r3, r0, #7 @ Get bit offset
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* linux/arch/arm/mach-s3c2410/usb-simtec.c
|
||||
*
|
||||
* Copyright (c) 2004 Simtec Electronics
|
||||
* Copyright (c) 2004,2005 Simtec Electronics
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
*
|
||||
* http://www.simtec.co.uk/products/EB2410ITX/
|
||||
@ -14,6 +14,8 @@
|
||||
* Modifications:
|
||||
* 14-Sep-2004 BJD Created
|
||||
* 18-Oct-2004 BJD Cleanups, and added code to report OC cleared
|
||||
* 09-Aug-2005 BJD Renamed s3c2410_report_oc to s3c2410_usb_report_oc
|
||||
* 09-Aug-2005 BJD Ports powered only if both are enabled
|
||||
*/
|
||||
|
||||
#define DEBUG
|
||||
@ -47,13 +49,19 @@
|
||||
* designed boards.
|
||||
*/
|
||||
|
||||
static unsigned int power_state[2];
|
||||
|
||||
static void
|
||||
usb_simtec_powercontrol(int port, int to)
|
||||
{
|
||||
pr_debug("usb_simtec_powercontrol(%d,%d)\n", port, to);
|
||||
|
||||
if (port == 1)
|
||||
s3c2410_gpio_setpin(S3C2410_GPB4, to ? 0:1);
|
||||
power_state[port] = to;
|
||||
|
||||
if (power_state[0] && power_state[1])
|
||||
s3c2410_gpio_setpin(S3C2410_GPB4, 0);
|
||||
else
|
||||
s3c2410_gpio_setpin(S3C2410_GPB4, 1);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
@ -63,10 +71,10 @@ usb_simtec_ocirq(int irq, void *pw, struct pt_regs *regs)
|
||||
|
||||
if (s3c2410_gpio_getpin(S3C2410_GPG10) == 0) {
|
||||
pr_debug("usb_simtec: over-current irq (oc detected)\n");
|
||||
s3c2410_report_oc(info, 3);
|
||||
s3c2410_usb_report_oc(info, 3);
|
||||
} else {
|
||||
pr_debug("usb_simtec: over-current irq (oc cleared)\n");
|
||||
s3c2410_report_oc(info, 0);
|
||||
s3c2410_usb_report_oc(info, 0);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -383,6 +383,7 @@ static void __init build_mem_type_table(void)
|
||||
{
|
||||
struct cachepolicy *cp;
|
||||
unsigned int cr = get_cr();
|
||||
unsigned int user_pgprot;
|
||||
int cpu_arch = cpu_architecture();
|
||||
int i;
|
||||
|
||||
@ -408,6 +409,9 @@ static void __init build_mem_type_table(void)
|
||||
}
|
||||
}
|
||||
|
||||
cp = &cache_policies[cachepolicy];
|
||||
user_pgprot = cp->pte;
|
||||
|
||||
/*
|
||||
* ARMv6 and above have extended page tables.
|
||||
*/
|
||||
@ -426,11 +430,18 @@ static void __init build_mem_type_table(void)
|
||||
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
|
||||
|
||||
/*
|
||||
* Mark the device area as "shared device"
|
||||
*/
|
||||
mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
|
||||
mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
|
||||
}
|
||||
|
||||
cp = &cache_policies[cachepolicy];
|
||||
/*
|
||||
* User pages need to be mapped with the ASID
|
||||
* (iow, non-global)
|
||||
*/
|
||||
user_pgprot |= L_PTE_ASID;
|
||||
}
|
||||
|
||||
if (cpu_arch >= CPU_ARCH_ARMv5) {
|
||||
mem_types[MT_LOW_VECTORS].prot_pte |= cp->pte & PTE_CACHEABLE;
|
||||
@ -448,7 +459,7 @@ static void __init build_mem_type_table(void)
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
unsigned long v = pgprot_val(protection_map[i]);
|
||||
v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | cp->pte;
|
||||
v &= (~(PTE_BUFFERABLE|PTE_CACHEABLE)) | user_pgprot;
|
||||
protection_map[i] = __pgprot(v);
|
||||
}
|
||||
|
||||
|
@ -111,12 +111,6 @@ ENTRY(cpu_v6_switch_mm)
|
||||
mcr p15, 0, r1, c13, c0, 1 @ set context ID
|
||||
mov pc, lr
|
||||
|
||||
#define nG (1 << 11)
|
||||
#define APX (1 << 9)
|
||||
#define AP1 (1 << 5)
|
||||
#define AP0 (1 << 4)
|
||||
#define XN (1 << 0)
|
||||
|
||||
/*
|
||||
* cpu_v6_set_pte(ptep, pte)
|
||||
*
|
||||
@ -139,24 +133,24 @@ ENTRY(cpu_v6_switch_mm)
|
||||
ENTRY(cpu_v6_set_pte)
|
||||
str r1, [r0], #-2048 @ linux version
|
||||
|
||||
bic r2, r1, #0x00000ff0
|
||||
bic r2, r1, #0x000007f0
|
||||
bic r2, r2, #0x00000003
|
||||
orr r2, r2, #AP0 | 2
|
||||
orr r2, r2, #PTE_EXT_AP0 | 2
|
||||
|
||||
tst r1, #L_PTE_WRITE
|
||||
tstne r1, #L_PTE_DIRTY
|
||||
orreq r2, r2, #APX
|
||||
orreq r2, r2, #PTE_EXT_APX
|
||||
|
||||
tst r1, #L_PTE_USER
|
||||
orrne r2, r2, #AP1 | nG
|
||||
tstne r2, #APX
|
||||
bicne r2, r2, #APX | AP0
|
||||
orrne r2, r2, #PTE_EXT_AP1
|
||||
tstne r2, #PTE_EXT_APX
|
||||
bicne r2, r2, #PTE_EXT_APX | PTE_EXT_AP0
|
||||
|
||||
tst r1, #L_PTE_YOUNG
|
||||
biceq r2, r2, #APX | AP1 | AP0
|
||||
biceq r2, r2, #PTE_EXT_APX | PTE_EXT_AP_MASK
|
||||
|
||||
@ tst r1, #L_PTE_EXEC
|
||||
@ orreq r2, r2, #XN
|
||||
@ orreq r2, r2, #PTE_EXT_XN
|
||||
|
||||
tst r1, #L_PTE_PRESENT
|
||||
moveq r2, #0
|
||||
|
@ -1803,7 +1803,7 @@ static void __init fixup_device_tree(void)
|
||||
if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev))
|
||||
== PROM_ERROR)
|
||||
return;
|
||||
if (u3_rev != 0x35)
|
||||
if (u3_rev != 0x35 && u3_rev != 0x37)
|
||||
return;
|
||||
/* does it need fixup ? */
|
||||
if (prom_getproplen(i2c, "interrupts") > 0)
|
||||
|
@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void)
|
||||
{
|
||||
if (notscsync || !cpu_has_tsc)
|
||||
return;
|
||||
sync_tsc(boot_cpu_id);
|
||||
sync_tsc(0);
|
||||
}
|
||||
|
||||
static __init int notscsync_setup(char *s)
|
||||
|
@ -261,7 +261,11 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
|
||||
|
||||
static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
|
||||
{
|
||||
unsigned long long val;
|
||||
unsigned long pfn;
|
||||
|
||||
/* Turn a kernel-virtual address into a physical page frame */
|
||||
pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* RED-PEN: on some architectures there is more mapped memory
|
||||
* than available in mem_map which pfn_valid checks
|
||||
@ -269,10 +273,10 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
|
||||
*
|
||||
* RED-PEN: vmalloc is not supported right now.
|
||||
*/
|
||||
if (!pfn_valid(vma->vm_pgoff))
|
||||
if (!pfn_valid(pfn))
|
||||
return -EIO;
|
||||
val = (u64)vma->vm_pgoff << PAGE_SHIFT;
|
||||
vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
|
||||
|
||||
vma->vm_pgoff = pfn;
|
||||
return mmap_mem(file, vma);
|
||||
}
|
||||
|
||||
|
@ -275,9 +275,9 @@ static int __init ns558_init(void)
|
||||
|
||||
static void __exit ns558_exit(void)
|
||||
{
|
||||
struct ns558 *ns558;
|
||||
struct ns558 *ns558, *safe;
|
||||
|
||||
list_for_each_entry(ns558, &ns558_list, node) {
|
||||
list_for_each_entry_safe(ns558, safe, &ns558_list, node) {
|
||||
gameport_unregister_port(ns558->gameport);
|
||||
release_region(ns558->io & ~(ns558->size - 1), ns558->size);
|
||||
kfree(ns558);
|
||||
|
@ -42,7 +42,7 @@
|
||||
#include "wbsd.h"
|
||||
|
||||
#define DRIVER_NAME "wbsd"
|
||||
#define DRIVER_VERSION "1.2"
|
||||
#define DRIVER_VERSION "1.3"
|
||||
|
||||
#ifdef CONFIG_MMC_DEBUG
|
||||
#define DBG(x...) \
|
||||
|
@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev)
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
disable_irq(adapter->pdev->irq);
|
||||
e1000_intr(adapter->pdev->irq, netdev, NULL);
|
||||
e1000_clean_tx_irq(adapter);
|
||||
enable_irq(adapter->pdev->irq);
|
||||
}
|
||||
#endif
|
||||
|
@ -130,12 +130,11 @@ struct sixpack {
|
||||
|
||||
#define AX25_6PACK_HEADER_LEN 0
|
||||
|
||||
static void sp_start_tx_timer(struct sixpack *);
|
||||
static void sixpack_decode(struct sixpack *, unsigned char[], int);
|
||||
static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
|
||||
|
||||
/*
|
||||
* perform the persistence/slottime algorithm for CSMA access. If the
|
||||
* Perform the persistence/slottime algorithm for CSMA access. If the
|
||||
* persistence check was successful, write the data to the serial driver.
|
||||
* Note that in case of DAMA operation, the data is not sent here.
|
||||
*/
|
||||
@ -143,7 +142,7 @@ static int encode_sixpack(unsigned char *, unsigned char *, int, unsigned char);
|
||||
static void sp_xmit_on_air(unsigned long channel)
|
||||
{
|
||||
struct sixpack *sp = (struct sixpack *) channel;
|
||||
int actual;
|
||||
int actual, when = sp->slottime;
|
||||
static unsigned char random;
|
||||
|
||||
random = random * 17 + 41;
|
||||
@ -159,20 +158,10 @@ static void sp_xmit_on_air(unsigned long channel)
|
||||
sp->tty->driver->write(sp->tty, &sp->led_state, 1);
|
||||
sp->status2 = 0;
|
||||
} else
|
||||
sp_start_tx_timer(sp);
|
||||
mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100);
|
||||
}
|
||||
|
||||
/* ----> 6pack timer interrupt handler and friends. <---- */
|
||||
static void sp_start_tx_timer(struct sixpack *sp)
|
||||
{
|
||||
int when = sp->slottime;
|
||||
|
||||
del_timer(&sp->tx_t);
|
||||
sp->tx_t.data = (unsigned long) sp;
|
||||
sp->tx_t.function = sp_xmit_on_air;
|
||||
sp->tx_t.expires = jiffies + ((when + 1) * HZ) / 100;
|
||||
add_timer(&sp->tx_t);
|
||||
}
|
||||
|
||||
/* Encapsulate one AX.25 frame and stuff into a TTY queue. */
|
||||
static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
|
||||
@ -243,8 +232,7 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
|
||||
sp->xleft = count;
|
||||
sp->xhead = sp->xbuff;
|
||||
sp->status2 = count;
|
||||
if (sp->duplex == 0)
|
||||
sp_start_tx_timer(sp);
|
||||
sp_xmit_on_air((unsigned long)sp);
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -183,7 +183,7 @@
|
||||
* cross a page boundy.
|
||||
*/
|
||||
#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
|
||||
#define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY)
|
||||
|
||||
|
||||
struct SGentry {
|
||||
u32 address; /* bus! address */
|
||||
@ -235,7 +235,6 @@ struct ScsiReqBlk {
|
||||
u8 sg_count; /* No of HW sg entries for this request */
|
||||
u8 sg_index; /* Index of HW sg entry for this request */
|
||||
u32 total_xfer_length; /* Total number of bytes remaining to be transfered */
|
||||
void **virt_map;
|
||||
unsigned char *virt_addr; /* Virtual address of current transfer position */
|
||||
|
||||
/*
|
||||
@ -1022,14 +1021,14 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
|
||||
reqlen, cmd->request_buffer, cmd->use_sg,
|
||||
srb->sg_count);
|
||||
|
||||
srb->virt_addr = page_address(sl->page);
|
||||
for (i = 0; i < srb->sg_count; i++) {
|
||||
u32 seglen = (u32)sg_dma_len(sl + i);
|
||||
sgp[i].address = (u32)sg_dma_address(sl + i);
|
||||
u32 busaddr = (u32)sg_dma_address(&sl[i]);
|
||||
u32 seglen = (u32)sl[i].length;
|
||||
sgp[i].address = busaddr;
|
||||
sgp[i].length = seglen;
|
||||
srb->total_xfer_length += seglen;
|
||||
srb->virt_map[i] = kmap(sl[i].page);
|
||||
}
|
||||
srb->virt_addr = srb->virt_map[0];
|
||||
sgp += srb->sg_count - 1;
|
||||
|
||||
/*
|
||||
@ -1976,7 +1975,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
|
||||
int segment = cmd->use_sg;
|
||||
u32 xferred = srb->total_xfer_length - left; /* bytes transfered */
|
||||
struct SGentry *psge = srb->segment_x + srb->sg_index;
|
||||
void **virt = srb->virt_map;
|
||||
|
||||
dprintkdbg(DBG_0,
|
||||
"sg_update_list: Transfered %i of %i bytes, %i remain\n",
|
||||
@ -2016,16 +2014,16 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
|
||||
|
||||
/* We have to walk the scatterlist to find it */
|
||||
sg = (struct scatterlist *)cmd->request_buffer;
|
||||
idx = 0;
|
||||
while (segment--) {
|
||||
unsigned long mask =
|
||||
~((unsigned long)sg->length - 1) & PAGE_MASK;
|
||||
if ((sg_dma_address(sg) & mask) == (psge->address & mask)) {
|
||||
srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK);
|
||||
srb->virt_addr = (page_address(sg->page)
|
||||
+ psge->address -
|
||||
(psge->address & PAGE_MASK));
|
||||
return;
|
||||
}
|
||||
++sg;
|
||||
++idx;
|
||||
}
|
||||
|
||||
dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n");
|
||||
@ -2151,7 +2149,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
|
||||
DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
|
||||
}
|
||||
/*
|
||||
* calculate all the residue data that not yet transfered
|
||||
* calculate all the residue data that not yet tranfered
|
||||
* SCSI transfer counter + left in SCSI FIFO data
|
||||
*
|
||||
* .....TRM_S1040_SCSI_COUNTER (24bits)
|
||||
@ -3269,7 +3267,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
|
||||
struct scsi_cmnd *cmd = srb->cmd;
|
||||
enum dma_data_direction dir = cmd->sc_data_direction;
|
||||
if (cmd->use_sg && dir != PCI_DMA_NONE) {
|
||||
int i;
|
||||
/* unmap DC395x SG list */
|
||||
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
|
||||
srb->sg_bus_addr, SEGMENTX_LEN);
|
||||
@ -3279,8 +3276,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
|
||||
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
|
||||
cmd->use_sg, cmd->request_buffer);
|
||||
/* unmap the sg segments */
|
||||
for (i = 0; i < srb->sg_count; i++)
|
||||
kunmap(virt_to_page(srb->virt_map[i]));
|
||||
pci_unmap_sg(acb->dev,
|
||||
(struct scatterlist *)cmd->request_buffer,
|
||||
cmd->use_sg, dir);
|
||||
@ -3327,7 +3322,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
||||
|
||||
if (cmd->use_sg) {
|
||||
struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer;
|
||||
ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset);
|
||||
ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset);
|
||||
} else {
|
||||
ptr = (struct ScsiInqData *)(cmd->request_buffer);
|
||||
}
|
||||
@ -4262,9 +4257,8 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
|
||||
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
|
||||
|
||||
for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
|
||||
kfree(acb->srb_array[i].segment_x);
|
||||
|
||||
vfree(acb->srb_array[0].virt_map);
|
||||
if (acb->srb_array[i].segment_x)
|
||||
kfree(acb->srb_array[i].segment_x);
|
||||
}
|
||||
|
||||
|
||||
@ -4280,12 +4274,9 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
|
||||
int srb_idx = 0;
|
||||
unsigned i = 0;
|
||||
struct SGentry *ptr;
|
||||
void **virt_array;
|
||||
|
||||
for (i = 0; i < DC395x_MAX_SRB_CNT; i++) {
|
||||
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
|
||||
acb->srb_array[i].segment_x = NULL;
|
||||
acb->srb_array[i].virt_map = NULL;
|
||||
}
|
||||
|
||||
dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
|
||||
while (pages--) {
|
||||
@ -4306,19 +4297,6 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
|
||||
ptr + (i * DC395x_MAX_SG_LISTENTRY);
|
||||
else
|
||||
dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
|
||||
|
||||
virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*));
|
||||
|
||||
if (!virt_array) {
|
||||
adapter_sg_tables_free(acb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) {
|
||||
acb->srb_array[i].virt_map = virt_array;
|
||||
virt_array += DC395x_MAX_SG_LISTENTRY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -907,9 +907,13 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
||||
raptorFlag = TRUE;
|
||||
}
|
||||
|
||||
|
||||
if (pci_request_regions(pDev, "dpt_i2o")) {
|
||||
PERROR("dpti: adpt_config_hba: pci request region failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
|
||||
if (!base_addr_virt) {
|
||||
pci_release_regions(pDev);
|
||||
PERROR("dpti: adpt_config_hba: io remap failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -919,6 +923,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
||||
if (!msg_addr_virt) {
|
||||
PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
|
||||
iounmap(base_addr_virt);
|
||||
pci_release_regions(pDev);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
@ -932,6 +937,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
||||
iounmap(msg_addr_virt);
|
||||
}
|
||||
iounmap(base_addr_virt);
|
||||
pci_release_regions(pDev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pHba, 0, sizeof(adpt_hba));
|
||||
@ -1027,6 +1033,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
|
||||
up(&adpt_configuration_lock);
|
||||
|
||||
iounmap(pHba->base_addr_virt);
|
||||
pci_release_regions(pHba->pDev);
|
||||
if(pHba->msg_addr_virt != pHba->base_addr_virt){
|
||||
iounmap(pHba->msg_addr_virt);
|
||||
}
|
||||
|
@ -385,6 +385,7 @@ int ata_scsi_error(struct Scsi_Host *host)
|
||||
* appropriate place
|
||||
*/
|
||||
host->host_failed--;
|
||||
INIT_LIST_HEAD(&host->eh_cmd_q);
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
return 0;
|
||||
|
@ -468,7 +468,7 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
|
||||
for (i = 0; i < last; i++) {
|
||||
buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i]));
|
||||
buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i]));
|
||||
total_len += sg[i].length;
|
||||
total_len += sg_dma_len(&sg[i]);
|
||||
}
|
||||
buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
|
||||
sgt_len = idx * 4;
|
||||
|
@ -336,9 +336,23 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
|
||||
unsigned long flags;
|
||||
const int size = sizeof(struct scsi_target)
|
||||
+ shost->transportt->target_size;
|
||||
struct scsi_target *starget = kmalloc(size, GFP_ATOMIC);
|
||||
struct scsi_target *starget;
|
||||
struct scsi_target *found_target;
|
||||
|
||||
/*
|
||||
* Obtain the real parent from the transport. The transport
|
||||
* is allowed to fail (no error) if there is nothing at that
|
||||
* target id.
|
||||
*/
|
||||
if (shost->transportt->target_parent) {
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
parent = shost->transportt->target_parent(shost, channel, id);
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
if (!parent)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
starget = kmalloc(size, GFP_KERNEL);
|
||||
if (!starget) {
|
||||
printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
|
||||
return NULL;
|
||||
|
@ -1022,6 +1022,23 @@ static int fc_rport_match(struct attribute_container *cont,
|
||||
return &i->rport_attr_cont.ac == cont;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Must be called with shost->host_lock held
|
||||
*/
|
||||
static struct device *fc_target_parent(struct Scsi_Host *shost,
|
||||
int channel, uint id)
|
||||
{
|
||||
struct fc_rport *rport;
|
||||
|
||||
list_for_each_entry(rport, &fc_host_rports(shost), peers)
|
||||
if ((rport->channel == channel) &&
|
||||
(rport->scsi_target_id == id))
|
||||
return &rport->dev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct scsi_transport_template *
|
||||
fc_attach_transport(struct fc_function_template *ft)
|
||||
{
|
||||
@ -1057,6 +1074,8 @@ fc_attach_transport(struct fc_function_template *ft)
|
||||
|
||||
/* Transport uses the shost workq for scsi scanning */
|
||||
i->t.create_work_queue = 1;
|
||||
|
||||
i->t.target_parent = fc_target_parent;
|
||||
|
||||
/*
|
||||
* Setup SCSI Target Attributes.
|
||||
|
@ -717,6 +717,9 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
|
||||
DPRINTK("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
|
||||
DPRINTK("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3);
|
||||
|
||||
/* enable LCD controller clock */
|
||||
pxa_set_cken(CKEN16_LCD, 1);
|
||||
|
||||
/* Sequence from 11.7.10 */
|
||||
LCCR3 = fbi->reg_lccr3;
|
||||
LCCR2 = fbi->reg_lccr2;
|
||||
@ -750,6 +753,9 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
|
||||
|
||||
schedule_timeout(20 * HZ / 1000);
|
||||
remove_wait_queue(&fbi->ctrlr_wait, &wait);
|
||||
|
||||
/* disable LCD controller clock */
|
||||
pxa_set_cken(CKEN16_LCD, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1299,8 +1305,6 @@ int __init pxafb_probe(struct device *dev)
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
/* enable LCD controller clock */
|
||||
pxa_set_cken(CKEN16_LCD, 1);
|
||||
|
||||
ret = request_irq(IRQ_LCD, pxafb_handle_irq, SA_INTERRUPT, "LCD", fbi);
|
||||
if (ret) {
|
||||
|
@ -593,7 +593,7 @@ void w1_search(struct w1_master *dev, w1_slave_found_callback cb)
|
||||
* Return 0 - device(s) present, 1 - no devices present.
|
||||
*/
|
||||
if (w1_reset_bus(dev)) {
|
||||
dev_info(&dev->dev, "No devices present on the wire.\n");
|
||||
dev_dbg(&dev->dev, "No devices present on the wire.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -593,6 +593,9 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode)
|
||||
*/
|
||||
inode->i_uid = current->fsuid;
|
||||
inode->i_mode = mode;
|
||||
/* Make inode invalid - just in case we are going to drop it before
|
||||
* the initialization happens */
|
||||
INODE_PKEY(inode)->k_objectid = 0;
|
||||
|
||||
if (dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
|
@ -12,6 +12,7 @@
|
||||
* Changelog:
|
||||
* 11-Sep-2004 BJD Created file
|
||||
* 21-Sep-2004 BJD Updated port info
|
||||
* 09-Aug-2005 BJD Renamed s3c2410_report_oc s3c2410_usb_report_oc
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARCH_USBCONTROL_H
|
||||
@ -35,7 +36,7 @@ struct s3c2410_hcd_info {
|
||||
void (*report_oc)(struct s3c2410_hcd_info *, int ports);
|
||||
};
|
||||
|
||||
static void inline s3c2410_report_oc(struct s3c2410_hcd_info *info, int ports)
|
||||
static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
|
||||
{
|
||||
if (info->report_oc != NULL) {
|
||||
(info->report_oc)(info, ports);
|
||||
|
@ -188,12 +188,18 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
/*
|
||||
* - extended small page/tiny page
|
||||
*/
|
||||
#define PTE_EXT_XN (1 << 0) /* v6 */
|
||||
#define PTE_EXT_AP_MASK (3 << 4)
|
||||
#define PTE_EXT_AP0 (1 << 4)
|
||||
#define PTE_EXT_AP1 (2 << 4)
|
||||
#define PTE_EXT_AP_UNO_SRO (0 << 4)
|
||||
#define PTE_EXT_AP_UNO_SRW (1 << 4)
|
||||
#define PTE_EXT_AP_URO_SRW (2 << 4)
|
||||
#define PTE_EXT_AP_URW_SRW (3 << 4)
|
||||
#define PTE_EXT_AP_UNO_SRW (PTE_EXT_AP0)
|
||||
#define PTE_EXT_AP_URO_SRW (PTE_EXT_AP1)
|
||||
#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
|
||||
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
|
||||
#define PTE_EXT_APX (1 << 9) /* v6 */
|
||||
#define PTE_EXT_SHARED (1 << 10) /* v6 */
|
||||
#define PTE_EXT_NG (1 << 11) /* v6 */
|
||||
|
||||
/*
|
||||
* - small page
|
||||
@ -224,6 +230,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
#define L_PTE_WRITE (1 << 5)
|
||||
#define L_PTE_EXEC (1 << 6)
|
||||
#define L_PTE_DIRTY (1 << 7)
|
||||
#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
|
||||
#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -406,7 +406,7 @@ register long __sc6 __asm__ ("r6") = (long) arg3; \
|
||||
register long __sc7 __asm__ ("r7") = (long) arg4; \
|
||||
register long __sc0 __asm__ ("r0") = (long) arg5; \
|
||||
register long __sc1 __asm__ ("r1") = (long) arg6; \
|
||||
__asm__ __volatile__ ("trapa #0x15" \
|
||||
__asm__ __volatile__ ("trapa #0x16" \
|
||||
: "=z" (__sc0) \
|
||||
: "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \
|
||||
"r" (__sc3), "r" (__sc1) \
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
struct netpoll;
|
||||
@ -26,6 +27,7 @@ struct netpoll {
|
||||
struct netpoll_info {
|
||||
spinlock_t poll_lock;
|
||||
int poll_owner;
|
||||
int tries;
|
||||
int rx_flags;
|
||||
spinlock_t rx_lock;
|
||||
struct netpoll *rx_np; /* netpoll that registered an rx_hook */
|
||||
@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_lock(struct net_device *dev)
|
||||
static inline void *netpoll_poll_lock(struct net_device *dev)
|
||||
{
|
||||
rcu_read_lock(); /* deal with race on ->npinfo */
|
||||
if (dev->npinfo) {
|
||||
spin_lock(&dev->npinfo->poll_lock);
|
||||
dev->npinfo->poll_owner = smp_processor_id();
|
||||
return dev->npinfo;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void netpoll_poll_unlock(struct net_device *dev)
|
||||
static inline void netpoll_poll_unlock(void *have)
|
||||
{
|
||||
if (dev->npinfo) {
|
||||
dev->npinfo->poll_owner = -1;
|
||||
spin_unlock(&dev->npinfo->poll_lock);
|
||||
struct netpoll_info *npi = have;
|
||||
|
||||
if (npi) {
|
||||
npi->poll_owner = -1;
|
||||
spin_unlock(&npi->poll_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else
|
||||
#define netpoll_rx(a) 0
|
||||
#define netpoll_poll_lock(a)
|
||||
#define netpoll_poll_lock(a) 0
|
||||
#define netpoll_poll_unlock(a)
|
||||
#endif
|
||||
|
||||
|
@ -255,7 +255,7 @@ struct sk_buff {
|
||||
nohdr:1;
|
||||
/* 3 bits spare */
|
||||
__u8 pkt_type;
|
||||
__u16 protocol;
|
||||
__be16 protocol;
|
||||
|
||||
void (*destructor)(struct sk_buff *skb);
|
||||
#ifdef CONFIG_NETFILTER
|
||||
|
@ -29,6 +29,14 @@ struct scsi_transport_template {
|
||||
struct transport_container target_attrs;
|
||||
struct transport_container device_attrs;
|
||||
|
||||
/*
|
||||
* If set, call target_parent prior to allocating a scsi_target,
|
||||
* so we get the appropriate parent for the target. This function
|
||||
* is required for transports like FC and iSCSI that do not put the
|
||||
* scsi_target under scsi_host.
|
||||
*/
|
||||
struct device *(*target_parent)(struct Scsi_Host *, int, uint);
|
||||
|
||||
/* The size of the specific transport attribute structure (a
|
||||
* space of this size will be left at the end of the
|
||||
* scsi_* structure */
|
||||
|
@ -308,8 +308,6 @@ struct workqueue_struct *__create_workqueue(const char *name,
|
||||
struct workqueue_struct *wq;
|
||||
struct task_struct *p;
|
||||
|
||||
BUG_ON(strlen(name) > 10);
|
||||
|
||||
wq = kmalloc(sizeof(*wq), GFP_KERNEL);
|
||||
if (!wq)
|
||||
return NULL;
|
||||
|
@ -1696,7 +1696,8 @@ static void net_rx_action(struct softirq_action *h)
|
||||
struct softnet_data *queue = &__get_cpu_var(softnet_data);
|
||||
unsigned long start_time = jiffies;
|
||||
int budget = netdev_budget;
|
||||
|
||||
void *have;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
while (!list_empty(&queue->poll_list)) {
|
||||
@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h)
|
||||
|
||||
dev = list_entry(queue->poll_list.next,
|
||||
struct net_device, poll_list);
|
||||
netpoll_poll_lock(dev);
|
||||
have = netpoll_poll_lock(dev);
|
||||
|
||||
if (dev->quota <= 0 || dev->poll(dev, &budget)) {
|
||||
netpoll_poll_unlock(dev);
|
||||
netpoll_poll_unlock(have);
|
||||
local_irq_disable();
|
||||
list_del(&dev->poll_list);
|
||||
list_add_tail(&dev->poll_list, &queue->poll_list);
|
||||
@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h)
|
||||
else
|
||||
dev->quota = dev->weight;
|
||||
} else {
|
||||
netpoll_poll_unlock(dev);
|
||||
netpoll_poll_unlock(have);
|
||||
dev_put(dev);
|
||||
local_irq_disable();
|
||||
}
|
||||
|
@ -33,6 +33,7 @@
|
||||
#define MAX_UDP_CHUNK 1460
|
||||
#define MAX_SKBS 32
|
||||
#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
|
||||
#define MAX_RETRIES 20000
|
||||
|
||||
static DEFINE_SPINLOCK(skb_list_lock);
|
||||
static int nr_skbs;
|
||||
@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
|
||||
int status;
|
||||
struct netpoll_info *npinfo;
|
||||
|
||||
repeat:
|
||||
if(!np || !np->dev || !netif_running(np->dev)) {
|
||||
if (!np || !np->dev || !netif_running(np->dev)) {
|
||||
__kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* avoid recursion */
|
||||
npinfo = np->dev->npinfo;
|
||||
|
||||
/* avoid recursion */
|
||||
if (npinfo->poll_owner == smp_processor_id() ||
|
||||
np->dev->xmit_lock_owner == smp_processor_id()) {
|
||||
if (np->drop)
|
||||
@ -265,30 +266,37 @@ repeat:
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&np->dev->xmit_lock);
|
||||
np->dev->xmit_lock_owner = smp_processor_id();
|
||||
do {
|
||||
npinfo->tries--;
|
||||
spin_lock(&np->dev->xmit_lock);
|
||||
np->dev->xmit_lock_owner = smp_processor_id();
|
||||
|
||||
/*
|
||||
* network drivers do not expect to be called if the queue is
|
||||
* stopped.
|
||||
*/
|
||||
if (netif_queue_stopped(np->dev)) {
|
||||
/*
|
||||
* network drivers do not expect to be called if the queue is
|
||||
* stopped.
|
||||
*/
|
||||
if (netif_queue_stopped(np->dev)) {
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
netpoll_poll(np);
|
||||
udelay(50);
|
||||
continue;
|
||||
}
|
||||
|
||||
status = np->dev->hard_start_xmit(skb, np->dev);
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
|
||||
netpoll_poll(np);
|
||||
goto repeat;
|
||||
}
|
||||
/* success */
|
||||
if(!status) {
|
||||
npinfo->tries = MAX_RETRIES; /* reset */
|
||||
return;
|
||||
}
|
||||
|
||||
status = np->dev->hard_start_xmit(skb, np->dev);
|
||||
np->dev->xmit_lock_owner = -1;
|
||||
spin_unlock(&np->dev->xmit_lock);
|
||||
|
||||
/* transmit busy */
|
||||
if(status) {
|
||||
/* transmit busy */
|
||||
netpoll_poll(np);
|
||||
goto repeat;
|
||||
}
|
||||
udelay(50);
|
||||
} while (npinfo->tries > 0);
|
||||
}
|
||||
|
||||
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
||||
@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb)
|
||||
unsigned char *arp_ptr;
|
||||
int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
|
||||
u32 sip, tip;
|
||||
unsigned long flags;
|
||||
struct sk_buff *send_skb;
|
||||
struct netpoll *np = NULL;
|
||||
|
||||
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
||||
if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
|
||||
np = npinfo->rx_np;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np)
|
||||
if (!npinfo)
|
||||
goto release;
|
||||
|
||||
npinfo->rx_flags = 0;
|
||||
npinfo->rx_np = NULL;
|
||||
npinfo->poll_lock = SPIN_LOCK_UNLOCKED;
|
||||
npinfo->poll_owner = -1;
|
||||
npinfo->tries = MAX_RETRIES;
|
||||
npinfo->rx_lock = SPIN_LOCK_UNLOCKED;
|
||||
} else
|
||||
npinfo = ndev->npinfo;
|
||||
@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np)
|
||||
npinfo->rx_np = np;
|
||||
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
||||
}
|
||||
|
||||
/* fill up the skb queue */
|
||||
refill_skbs();
|
||||
|
||||
/* last thing to do is link it to the net device structure */
|
||||
ndev->npinfo = npinfo;
|
||||
|
||||
/* avoid racing with NAPI reading npinfo */
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
|
||||
release:
|
||||
|
@ -1876,15 +1876,6 @@ static inline unsigned int dn_current_mss(struct sock *sk, int flags)
|
||||
return mss_now;
|
||||
}
|
||||
|
||||
static int dn_error(struct sock *sk, int flags, int err)
|
||||
{
|
||||
if (err == -EPIPE)
|
||||
err = sock_error(sk) ? : -EPIPE;
|
||||
if (err == -EPIPE && !(flags & MSG_NOSIGNAL))
|
||||
send_sig(SIGPIPE, current, 0);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
|
||||
struct msghdr *msg, size_t size)
|
||||
{
|
||||
@ -2045,7 +2036,7 @@ out:
|
||||
return sent ? sent : err;
|
||||
|
||||
out_err:
|
||||
err = dn_error(sk, flags, err);
|
||||
err = sk_stream_error(sk, flags, err);
|
||||
release_sock(sk);
|
||||
return err;
|
||||
}
|
||||
|
@ -1370,15 +1370,21 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
if (skb->len > cur_mss) {
|
||||
int old_factor = tcp_skb_pcount(skb);
|
||||
int new_factor;
|
||||
int diff;
|
||||
|
||||
if (tcp_fragment(sk, skb, cur_mss, cur_mss))
|
||||
return -ENOMEM; /* We'll try again later. */
|
||||
|
||||
/* New SKB created, account for it. */
|
||||
new_factor = tcp_skb_pcount(skb);
|
||||
tp->packets_out -= old_factor - new_factor;
|
||||
tp->packets_out += tcp_skb_pcount(skb->next);
|
||||
diff = old_factor - tcp_skb_pcount(skb) -
|
||||
tcp_skb_pcount(skb->next);
|
||||
tp->packets_out -= diff;
|
||||
|
||||
if (diff > 0) {
|
||||
tp->fackets_out -= diff;
|
||||
if ((int)tp->fackets_out < 0)
|
||||
tp->fackets_out = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Collapse two adjacent packets if worthwhile and we can. */
|
||||
|
Loading…
Reference in New Issue
Block a user